hexsha
stringlengths
40
40
size
int64
22
2.4M
ext
stringclasses
5 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
260
max_stars_repo_name
stringlengths
5
109
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
9
max_stars_count
float64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
260
max_issues_repo_name
stringlengths
5
109
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
9
max_issues_count
float64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
260
max_forks_repo_name
stringlengths
5
109
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
9
max_forks_count
float64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
22
2.4M
avg_line_length
float64
5
169k
max_line_length
int64
5
786k
alphanum_fraction
float64
0.06
0.95
matches
listlengths
1
11
ecd5016bdd3f0205b58fa76d74d11f78baa46099
1,306
h
C
include/Euclid/MeshUtil/MeshDefs.h
unclejimbo/euclid
e118abdcdf51b6bc05cf5aa056bf228e052cf501
[ "MIT" ]
31
2017-05-02T07:04:40.000Z
2022-01-31T10:00:01.000Z
include/Euclid/MeshUtil/MeshDefs.h
unclejimbo/euclid
e118abdcdf51b6bc05cf5aa056bf228e052cf501
[ "MIT" ]
null
null
null
include/Euclid/MeshUtil/MeshDefs.h
unclejimbo/euclid
e118abdcdf51b6bc05cf5aa056bf228e052cf501
[ "MIT" ]
4
2018-07-02T17:59:35.000Z
2020-03-18T07:01:17.000Z
/**Type helpers for mesh related types. * * @defgroup PkgMeshDefs MeshDefs * @ingroup PkgMeshUtil */ #pragma once #include <boost/graph/graph_traits.hpp> #include <CGAL/boost/graph/properties.h> #include <CGAL/Kernel_traits.h> namespace Euclid { /** @{*/ template<typename Mesh> using vpmap_t = typename boost::property_map<Mesh, CGAL::vertex_point_t>::type; template<typename Mesh> using vimap_t = typename boost::property_map<Mesh, CGAL::vertex_index_t>::type; template<typename PMap> using pmap_vt = typename boost::property_traits<PMap>::value_type; template<typename Mesh> using Kernel_t = typename CGAL::Kernel_traits<pmap_vt<vpmap_t<Mesh>>>::Kernel; template<typename Mesh> using Point_3_t = typename Kernel_t<Mesh>::Point_3; template<typename Mesh> using Vector_3_t = typename Kernel_t<Mesh>::Vector_3; template<typename Mesh> using FT_t = typename Kernel_t<Mesh>::FT; template<typename Mesh> using vertex_t = typename boost::graph_traits<Mesh>::vertex_descriptor; template<typename Mesh> using halfedge_t = typename boost::graph_traits<Mesh>::halfedge_descriptor; template<typename Mesh> using edge_t = typename boost::graph_traits<Mesh>::edge_descriptor; template<typename Mesh> using face_t = typename boost::graph_traits<Mesh>::face_descriptor; /** @}*/ } // namespace Euclid
25.607843
79
0.775651
[ "mesh" ]
ecd6d8b839e21984eead1338da90d582987abfd0
20,374
c
C
src/gtx/curseswindows.c
jakllsch/openafs
e739eaa650ee30dcce54d05908b062839eafbf73
[ "BSD-3-Clause" ]
null
null
null
src/gtx/curseswindows.c
jakllsch/openafs
e739eaa650ee30dcce54d05908b062839eafbf73
[ "BSD-3-Clause" ]
null
null
null
src/gtx/curseswindows.c
jakllsch/openafs
e739eaa650ee30dcce54d05908b062839eafbf73
[ "BSD-3-Clause" ]
null
null
null
/* * Copyright 2000, International Business Machines Corporation and others. * All Rights Reserved. * * This software has been released under the terms of the IBM Public * License. For details, see the LICENSE file in the top-level source * directory or online at http://www.openafs.org/dl/license10.html */ /* * gator_curseswindows.c * * Description: * Implementation of the gator curses window facility. * *------------------------------------------------------------------------*/ #include <afsconfig.h> #include <afs/param.h> #include <roken.h> #if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_FBSD80_ENV) #include <sgtty.h> #endif #include <lwp.h> #include "gtxcurseswin.h" /*Interface definition */ #include "gtxobjects.h" #include "gtxframe.h" int curses_debug; /*Is debugging turned on? */ static char mn[] = "gator_curseswindows"; /*Module name */ /* * Version of standard operations for a curses window. */ struct gwinops curses_gwinops = { gator_cursesgwin_box, gator_cursesgwin_clear, gator_cursesgwin_destroy, gator_cursesgwin_display, gator_cursesgwin_drawline, gator_cursesgwin_drawrectangle, gator_cursesgwin_drawchar, gator_cursesgwin_drawstring, gator_cursesgwin_invert, gator_cursesgwin_getchar, gator_cursesgwin_getdimensions, gator_cursesgwin_wait, }; struct gwinbaseops gator_curses_gwinbops = { gator_cursesgwin_create, gator_cursesgwin_cleanup, }; /* * Macros to map pixel positions to row & column positions. * (Note: for now, they are the identity function!!) */ #define GATOR_MAP_X_TO_COL(w, x) (x) #define GATOR_MAP_Y_TO_LINE(w, y) (y) /*------------------------------------------------------------------------ * gator_cursesgwin_init * * Description: * Initialize the curses window package. * * Arguments: * int adebug: Is debugging turned on? * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_init(int adebug) { /*gator_cursesgwin_init */ static char rn[] = "gator_cursesgwin_init"; /*Routine name */ struct gator_cursesgwin *c_data; /*Ptr to curses-specific data */ /* * Remember if we'll be doing debugging, then init the curses package. */ curses_debug = adebug; if (curses_debug) fprintf(stderr, "[%s:%s] Calling initscr()\n", mn, rn); initscr(); /* * Fill out the base window structure for curses. */ if (curses_debug) fprintf(stderr, "[%s:%s] Allocating %" AFS_SIZET_FMT " bytes for curses window private space in base window\n", mn, rn, sizeof(struct gator_cursesgwin)); c_data = malloc(sizeof(struct gator_cursesgwin)); if (c_data == (struct gator_cursesgwin *)0) { fprintf(stderr, "[%s:%s] Can't allocate %" AFS_SIZET_FMT " bytes for curses window private space in base window\n", mn, rn, sizeof(struct gator_cursesgwin)); return (-1); } /* * Fill in the curses-specific base window info. We assume that chars are 8x13. */ c_data->wp = stdscr; c_data->charwidth = 8; c_data->charheight = 13; c_data->box_vertchar = '|'; c_data->box_horizchar = '-'; /* * Fill in the generic base window info. */ gator_basegwin.w_type = GATOR_WIN_CURSES; gator_basegwin.w_x = 0; gator_basegwin.w_y = 0; gator_basegwin.w_width = c_data->charwidth * COLS; gator_basegwin.w_height = c_data->charheight * LINES; gator_basegwin.w_changed = 0; gator_basegwin.w_op = &curses_gwinops; gator_basegwin.w_parent = NULL; /* * Plug the private data into the generic part of the base window. */ gator_basegwin.w_data = (int *)c_data; /* * Now, set the terminal into the right mode for handling input */ raw(); /* curses raw mode */ /* init the frame */ gator_basegwin.w_frame = gtxframe_Create(); /* * Clear out the screen and return the good news. */ wclear(((struct gator_cursesgwin *)(gator_basegwin.w_data))->wp); return (0); } /*gator_cursesgwin_init */ /*------------------------------------------------------------------------ * gator_cursesgwin_create * * Description: * Create a curses window (incorrectly). * * Arguments: * struct gator_cursesgwin_params *params : Ptr to creation parameters. * * Returns: * Ptr to the created curses window if successful, * Null ptr otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ struct gwin * gator_cursesgwin_create(void * rock) { static char rn[] = "gator_cursesgwin_create"; /*Routine name */ struct gator_cursesgwin_params *params = (struct gator_cursesgwin_params *)rock; struct gwin *newgwin; /*Ptr to new curses window */ struct gator_cursesgwin *c_data; /*Ptr to curses-specific data */ WINDOW *newcursgwin; /*Ptr to new curses window */ if (curses_debug) fprintf(stderr, "[%s:%s] Allocating %" AFS_SIZET_FMT " bytes for new gwin structure\n", mn, rn, sizeof(struct gwin)); newgwin = malloc(sizeof(struct gwin)); if (newgwin == NULL) { fprintf(stderr, "[%s:%s] Can't malloc() %" AFS_SIZET_FMT " bytes for new gwin structure: Errno is %d\n", mn, rn, sizeof(struct gwin), errno); return (NULL); } newgwin->w_type = GATOR_WIN_CURSES; newgwin->w_x = params->gwin_params.cr_x; newgwin->w_y = params->gwin_params.cr_y; newgwin->w_width = params->gwin_params.cr_width; newgwin->w_height = params->gwin_params.cr_height; newgwin->w_changed = 1; newgwin->w_op = &curses_gwinops; newgwin->w_parent = params->gwin_params.cr_parentwin; if (curses_debug) fprintf(stderr, "[%s:%s] Allocating %" AFS_SIZET_FMT " bytes for curses window private space\n", mn, rn, sizeof(struct gator_cursesgwin)); c_data = malloc(sizeof(struct gator_cursesgwin)); if (c_data == (struct gator_cursesgwin *)0) { fprintf(stderr, "[%s:%s] Can't allocate %" AFS_SIZET_FMT " bytes for curses window private space\n", mn, rn, sizeof(struct gator_cursesgwin)); free(newgwin); return (NULL); } newcursgwin = newwin(newgwin->w_height, /*Number of lines */ newgwin->w_width, /*Number of columns */ newgwin->w_y, /*Beginning y value */ newgwin->w_x); /*Beginning x value */ if (newcursgwin == (WINDOW *) 0) { fprintf(stderr, "[%s:%s] Failed to create curses window via newwin()\n", mn, rn); free(newgwin); free(c_data); return (NULL); } /* * Now, fill in the curses-specific window info. */ c_data->wp = newcursgwin; c_data->charwidth = params->charwidth; c_data->charheight = params->charheight; c_data->box_vertchar = params->box_vertchar; c_data->box_horizchar = params->box_horizchar; /* * Plug in a frame at the top-level. */ newgwin->w_frame = gtxframe_Create(); /* * Plug the curses private data into the generic window object, then * return the new window's info. */ newgwin->w_data = (int *)c_data; return (newgwin); } /*gator_cursesgwin_create */ /*------------------------------------------------------------------------ * gator_cursesgwin_cleanup * * Description: * Clean up, probably right before the caller exits. * * Arguments: * struct gwin *gwp : Ptr to base window. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_cleanup(struct gwin *gwp) { /*gator_cursesgwin_cleanup */ static char rn[] = "gator_cursesgwin_cleanup"; /*Routine name */ struct gator_cursesgwin *cwp; /*Curses private area ptr */ cwp = (struct gator_cursesgwin *)(gwp->w_data); /* * Cleaning up in curses is extremely easy - one simple call. We also * want to clear the screen before we go. */ if (curses_debug) fprintf(stderr, "[%s:%s] Calling wclear() on window at %p\n", mn, rn, cwp->wp); wclear(cwp->wp); wrefresh(cwp->wp); /* * Now, set the terminal back into normal mode. */ noraw(); if (curses_debug) fprintf(stderr, "[%s:%s] Calling endwin()\n", mn, rn); endwin(); return (0); } /*gator_cursesgwin_cleanup */ /*------------------------------------------------------------------------ * gator_cursesgwin_box * * Description: * Draw a box around the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window to draw * a box around. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_box(struct gwin *gwp) { /*gator_cursesgwin_box */ static char rn[] = "gator_cursesgwin_box"; /*Routine name */ struct gator_cursesgwin *cwp; /*Ptr to curses private area */ cwp = (struct gator_cursesgwin *)(gwp->w_data); if (curses_debug) fprintf(stderr, "[%s:%s] Calling box() on window at %p\n", mn, rn, cwp->wp); box(cwp->wp, cwp->box_vertchar, cwp->box_horizchar); return (0); } /*gator_cursesgwin_box */ /*------------------------------------------------------------------------ * gator_cursesgwin_clear * * Description: * Clear out the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window to clear out. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_clear(struct gwin *gwp) { /*gator_cursesgwin_clear */ static char rn[] = "gator_cursesgwin_clear"; /*Routine name */ struct gator_cursesgwin *cwp; /*Ptr to curses private area */ /* * Clearing windows is very easy in curses; just one call will do it. */ cwp = (struct gator_cursesgwin *)(gwp->w_data); if (curses_debug) fprintf(stderr, "[%s:%s] Calling wclear() on window at %p\n", mn, rn, cwp->wp); wclear(cwp->wp); return (0); } /*gator_cursesgwin_clear */ /*------------------------------------------------------------------------ * gator_cursesgwin_destroy * * Description: * Destroy the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window to destroy. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_destroy(struct gwin *gwp) { /*gator_cursesgwin_destroy */ static char rn[] = "gator_cursesgwin_destroy"; /*Routine name */ struct gator_cursesgwin *cwp; /*Ptr to curses private area */ cwp = (struct gator_cursesgwin *)(gwp->w_data); if (curses_debug) fprintf(stderr, "[%s:%s] Calling delwin() on window at %p\n", mn, rn, cwp->wp); delwin(cwp->wp); return (0); } /*gator_cursesgwin_destroy */ /*------------------------------------------------------------------------ * gator_cursesgwin_display * * Description: * Display/redraw the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window to draw. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_display(struct gwin *gwp) { /*gator_cursesgwin_display */ struct gator_cursesgwin *cwp; /*Curses private area ptr */ cwp = (struct gator_cursesgwin *)(gwp->w_data); wclear(cwp->wp); /* clear screen */ gtxframe_Display(gwp->w_frame, gwp); /* display the frame */ wrefresh(cwp->wp); /* redraw the guy */ return (0); } /*gator_cursesgwin_display */ /*------------------------------------------------------------------------ * gator_cursesgwin_drawline * * Description: * Draw a line between two points in the given curses * window. * * Arguments: * struct gwin *gwp : Ptr to the curses window in which * the line is to be drawn. * struct gwin_lineparams *params : Ptr to other params. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_drawline(struct gwin *gwp, struct gwin_lineparams *params) { /*gator_cursesgwin_drawline */ static char rn[] = "gator_cursesgwin_drawline"; /*Routine name */ if (curses_debug) fprintf(stderr, "[%s:%s] This routine is currently a no-op\n", mn, rn); return (0); } /*gator_cursesgwin_drawline */ /*------------------------------------------------------------------------ * gator_cursesgwin_drawrectangle * * Description: * Draw a rectangle in the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window in which * the rectangle is to be drawn. * struct gwin_rectparams *params : Ptr to other params. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_drawrectangle(struct gwin *gwp, struct gwin_rectparams *params) { /*gator_cursesgwin_drawrectangle */ static char rn[] = "gator_cursesgwin_drawrectangle"; /*Routine name */ if (curses_debug) fprintf(stderr, "[%s:%s] This routine is currently a no-op\n", mn, rn); return (0); } /*gator_cursesgwin_drawrectangle */ /*------------------------------------------------------------------------ * gator_cursesgwin_drawchar * * Description: * Draw a character in the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window in which * the character is to be drawn. * struct gwin_charparams *params : Ptr to other params. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_drawchar(struct gwin *gwp, struct gwin_charparams *params) { /*gator_cursesgwin_drawchar */ static char rn[] = "gator_cursesgwin_drawchar"; /*Routine name */ struct gator_cursesgwin *cwp; /*Ptr to curses private area */ int curses_x, curses_y; /*Mapped x,y positions */ int code=0; cwp = (struct gator_cursesgwin *)(gwp->w_data); curses_x = GATOR_MAP_X_TO_COL(cwp, params->x); curses_y = GATOR_MAP_Y_TO_LINE(cwp, params->y); if (curses_debug) fprintf(stderr, "[%s:%s] Drawing char '%c' on window at %p at (%d, %d) [line %d, column %d]%s\n", mn, rn, params->c, cwp->wp, params->x, params->y, curses_y, curses_x, (params->highlight ? ", using standout mode" : "")); wmove(cwp->wp, curses_y, curses_x); if (params->highlight) { code=wstandout(cwp->wp); if (code) return (code); } waddch(cwp->wp, params->c); if (params->highlight) { code=wstandend(cwp->wp); if (code) return (code); } return (0); } /*gator_cursesgwin_drawchar */ /*------------------------------------------------------------------------ * gator_cursesgwin_drawstring * * Description: * Draw a string in the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window in which * the string is to be drawn. * struct gwin_strparams *params : Ptr to other params. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_drawstring(struct gwin *gwp, struct gwin_strparams *params) { /*gator_cursesgwin_drawstring */ static char rn[] = "gator_cursesgwin_drawstring"; /*Routine name */ struct gator_cursesgwin *cwp; /*Ptr to curses private area */ int curses_x, curses_y; /*Mapped x,y positions */ int code=0; cwp = (struct gator_cursesgwin *)(gwp->w_data); curses_x = GATOR_MAP_X_TO_COL(cwp, params->x); curses_y = GATOR_MAP_Y_TO_LINE(cwp, params->y); if (curses_debug) fprintf(stderr, "[%s:%s] Drawing string '%s' on window at %p at (%d, %d) [line %d, column %d]%s\n", mn, rn, params->s, cwp->wp, params->x, params->y, curses_y, curses_x, (params->highlight ? ", using standout mode" : "")); wmove(cwp->wp, curses_y, curses_x); if (params->highlight) { code=wstandout(cwp->wp); if (code) return (code); } waddstr(cwp->wp, params->s); if (params->highlight) { code=wstandend(cwp->wp); if (code) return (code); } return (code); } /*gator_cursesgwin_drawstring */ /*------------------------------------------------------------------------ * gator_cursesgwin_invert * * Description: * Invert a region in the given curses window. * * Arguments: * struct gwin *gwp : Ptr to the curses window in which * the inverted region lies. * struct gwin_invparams *params : Ptr to other params. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_invert(struct gwin *gwp, struct gwin_invparams *params) { /*gator_cursesgwin_invert */ static char rn[] = "gator_cursesgwin_invert"; /*Routine name */ if (curses_debug) fprintf(stderr, "[%s:%s] This routine is currently a no-op\n", mn, rn); return (0); } /*gator_cursesgwin_invert */ /*------------------------------------------------------------------------ * gator_cursesgwin_getchar * * Description: * Pick up a character from the given window. * * Arguments: * struct gwin *gwp : Ptr to the curses window to listen to. * * Returns: * Value of the character read, * -1 otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_getchar(struct gwin *gwp) { /*gator_cursesgwin_getchar */ return (getc(stdin)); } /*gator_cursesgwin_getchar */ /*------------------------------------------------------------------------ * gator_cursesgwin_wait * * Description: * Wait until input is available. * * Arguments: * struct gwin *gwp : Ptr to the curses window to wait on. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_wait(struct gwin *gwp) { /*gator_cursesgwin_wait */ while (!LWP_WaitForKeystroke(-1)); return (0); } /*gator_cursesgwin_wait */ /*------------------------------------------------------------------------ * gator_cursesgwin_getdimensions * * Description: * Get the window's X,Y dimensions. * * Arguments: * struct gwin *gwp : Ptr to the curses window to examine. * struct gwin_sizeparams *params : Ptr to the size params to set. * * Returns: * 0 on success, * Error value otherwise. * * Environment: * Nothing interesting. * * Side Effects: * As advertised. *------------------------------------------------------------------------*/ int gator_cursesgwin_getdimensions(struct gwin *gwp, struct gwin_sizeparams *aparms) { /*gator_cursesgwin_getdimensions */ struct gator_cursesgwin *cwp; /*Curses-specific data */ cwp = (struct gator_cursesgwin *)(gwp->w_data); getmaxyx(cwp->wp, aparms->maxy, aparms->maxx); return (0); } /*gator_cursesgwin_getdimensions */
26.35705
101
0.58663
[ "object" ]
ecd8b41dc33635e7199570ce7d977a79d16615bb
54,118
c
C
src/mulMats.c
Bnel13/FastHenry-ACA-
dffeea9b4203c365e3f6f1e7a7715e39ee0202e8
[ "MIT" ]
17
2019-03-03T10:48:13.000Z
2022-01-25T14:20:00.000Z
src/mulMats.c
Bnel13/FastHenry-ACA-
dffeea9b4203c365e3f6f1e7a7715e39ee0202e8
[ "MIT" ]
null
null
null
src/mulMats.c
Bnel13/FastHenry-ACA-
dffeea9b4203c365e3f6f1e7a7715e39ee0202e8
[ "MIT" ]
4
2019-02-01T15:51:36.000Z
2021-06-30T13:06:28.000Z
/*!\page LICENSE LICENSE Copyright (C) 2003 by the Board of Trustees of Massachusetts Institute of Technology, hereafter designated as the Copyright Owners. License to use, copy, modify, sell and/or distribute this software and its documentation for any purpose is hereby granted without royalty, subject to the following terms and conditions: 1. The above copyright notice and this permission notice must appear in all copies of the software and related documentation. 2. The names of the Copyright Owners may not be used in advertising or publicity pertaining to distribution of the software without the specific, prior written permission of the Copyright Owners. 3. THE SOFTWARE IS PROVIDED "AS-IS" AND THE COPYRIGHT OWNERS MAKE NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED, BY WAY OF EXAMPLE, BUT NOT LIMITATION. THE COPYRIGHT OWNERS MAKE NO REPRESENTATIONS OR WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT INFRINGE ANY PATENTS, COPYRIGHTS TRADEMARKS OR OTHER RIGHTS. THE COPYRIGHT OWNERS SHALL NOT BE LIABLE FOR ANY LIABILITY OR DAMAGES WITH RESPECT TO ANY CLAIM BY LICENSEE OR ANY THIRD PARTY ON ACCOUNT OF, OR ARISING FROM THE LICENSE, OR ANY SUBLICENSE OR USE OF THE SOFTWARE OR ANY SERVICE OR SUPPORT. LICENSEE shall indemnify, hold harmless and defend the Copyright Owners and their trustees, officers, employees, students and agents against any and all claims arising out of the exercise of any rights under this Agreement, including, without limiting the generality of the foregoing, against any damages, losses or liabilities whatsoever with respect to death or injury to person or damage to property arising from or out of the possession, use, or operation of Software or Licensed Program(s) by LICENSEE or its customers. */ /* # ***** sort to /src/main # ***** */ //#include "mulGlobal.h" #include "induct.h"//BAPN change int *localcnt, *multicnt, *evalcnt; /* counts of builds done by level */ int **Q2Mcnt, **Q2Lcnt, **Q2Pcnt, **L2Lcnt; /* counts of xformation mats */ int **M2Mcnt, **M2Lcnt, **M2Pcnt, **L2Pcnt, **Q2PDcnt; /* SRW */ void mulMatDirect(ssystem*); void bdmulMatPrecond(ssystem*); void olmulMatPrecond(ssystem*); void find_flux_density_row(double**, double**, int, int, int, int, int, charge**, charge**, int*, int*); void mulMatUp(ssystem*); void mulMatEval(ssystem*); void mulMatDown(ssystem*); long double mutualfill33(FILAMENT *fil1, FILAMENT *fil2) { long double inductance = 0; long double volume1 = 0; long double volume2 = 0; long double distance = 0; long double disx = 0.0; long double disy = 0; long double disz = 0; long double permeability = 0; long double combo = 0; disx = (fil1->x[0] + fil1->x[1]) / 2 - (fil2->x[0] + fil2->x[1]) / 2;// ^ 2; disy = (fil1->y[0] + fil1->y[1]) / 2 - (fil2->y[0] + fil2->y[1]) / 2;// ^ 2; disz = (fil1->z[0] + fil1->z[1]) / 2 - (fil2->z[0] + fil2->z[1]) / 2;// ^ 2); combo = disx * disx + disy * disy + disz * disz; long double a1 = 0; long double a2 = 0; a1 = fil1->area; a2 = fil2->area; volume1 = fil1->area*fil1->length; volume2 = fil2->area*fil2->length; distance = sqrt(combo); inductance = (MU0 / (4 * PI *a1*a2))*((volume1*volume2) / (distance)); return inductance; } void MLACA(int depth, int *number_of_groups_level, int **number_of_interactions_group, int ***interactions, int **num_elements, int ***elements, charge **filaments, double **total,ssystem *ssys) { //ssys->number_groups_level = (int *)calloc(depth,sizeof(int)); int hshshshshss = 0; //int nummm = 0; /* double **total = (double **)malloc(1643*sizeof(double*)); for (int i = 0; i < 1643; i++) { total[i] = (double *)malloc(1643*sizeof(double)); } */ int count = 0; for (int i = 0; i < depth - 1; i++) { for (int j = 0; j < number_of_groups_level[i + 1]; j++) { for (int k = 0; k < number_of_interactions_group[i][j]; k++) { ++count; } } } //ssys->k = (int*)malloc(count * sizeof(int)); long double ***UU = (long double ***)calloc(count, sizeof(long double **)); //ssys->U = (long double ***)calloc(count, sizeof(long double **)); long double ***VV = (long double ***)calloc(count, sizeof(long double **)); //ssys->V = (long double ***)calloc(count, sizeof(long double **)); /* int count1 = 0; for (int i = 0; i < depth - 1; i++) { for (int j = 0; j < number_of_groups_level[i + 1]; j++) { for (int k = 0; k < number_of_interactions_group[i][j]; k++) { UU[count1] = (long double **)calloc(num_elements[i + 1][j], sizeof(long double*)); VV[count1] = (long double **)calloc(num_elements[i + 1][interactions[i][j][k]], sizeof(long double*));//k for (int q = 0; q < num_elements[i + 1][j]; q++) { UU[count1][q] = (long double *)calloc(num_elements[i + 1][j], sizeof(long double));//k } for (int p = 0; p < num_elements[i + 1][interactions[i][j][k]]; p++)//k { VV[count1][p] = (long double *)calloc(num_elements[i + 1][interactions[i][j][k]], sizeof(long double)); } ++count1; } } } *///End working version //for (int i = 0; i<1; i++) //U[i] = (double *)malloc( sizeof(double)); //for (int i = 0; i<1; i++) //V[i] = (double *)malloc(sizeof(double)); int *KK = (int *)malloc(count * sizeof(int)); int count2 = 0; //unsigned long int memory = 0; int memory = 0; //ssys->number_of_interactions_groups = (int **)malloc(depth * sizeof(int*)); //ssys->interact = (int***)calloc(depth , sizeof(int**)); for (int i = 0; i < depth - 1; i++) { //ssys->interact[i] = (int**)calloc(number_of_groups_level[i + 1], sizeof(int*)); //ssys->number_of_interactions_groups[i] = (int *)malloc(number_of_groups_level[i]* sizeof(int)); for (int j = 0; j < number_of_groups_level[i + 1]; j++) { //ssys->interact[i][j] = (int**)calloc(number_of_groups_level[i + 1], sizeof(int*)); //++ssys->number_groups_level[i + 1]; //ssys->number_of_interactions_groups[i][j] = number_of_interactions_group[i][j]; for (int k = 0; k < number_of_interactions_group[i][j]; k++) { //ssys->interact[i][j][k] = interactions[i][j][k]; //Start memory allocation change UU[count2] = (long double **)calloc(num_elements[i + 1][j], sizeof(long double*)); //ssys->U[count2] = (long double **)calloc(num_elements[i + 1][j], sizeof(long double*)); VV[count2] = (long double **)calloc(num_elements[i + 1][interactions[i][j][k]], sizeof(long double*));//k //ssys->V[count2] = (long double **)calloc(num_elements[i + 1][interactions[i][j][k]], sizeof(long double*));//k for (int q = 0; q < num_elements[i + 1][j]; q++) { UU[count2][q] = (long double *)calloc(num_elements[i + 1][j], sizeof(long double));//k //ssys->U[count2][q] = (long double *)calloc(num_elements[i + 1][j], sizeof(long double));//k } for (int p = 0; p < num_elements[i + 1][interactions[i][j][k]]; p++)//k { VV[count2][p] = (long double *)calloc(num_elements[i + 1][interactions[i][j][k]], sizeof(long double)); //ssys->V[count2][p] = (long double *)calloc(num_elements[i + 1][interactions[i][j][k]], sizeof(long double)); } //end memory allocation change //fprintf(matrix, "%d\t", memory); /* double **U = (double **)malloc(num_elements[i + 1][j] * sizeof(double *)); for (int i = 0; i<num_elements[i + 1][j]; i++) U[i] = (double *)malloc(num_elements[i + 1][interactions[i][j][k]] / 2 * sizeof(double)); double **V = (double **)malloc(num_elements[i + 1][j] / 2 * sizeof(double *)); for (int i = 0; i<num_elements[i + 1][j] / 2; i++) V[i] = (double *)malloc(num_elements[i + 1][interactions[i][j][k]] * sizeof(double)); */ //memory = ACA_new(num_elements[i + 1][j], num_elements[i + 1][interactions[i][j][k]], elements[i + 1][j], elements[i + 1][interactions[i][j][k]], filaments,UU[count2],VV[count2]); KK[count2] = memory; //ssys->k[count2] = memory; //total[elements[a + 1][b][i] - 1][elements[a + 1][interactions[a][b][k]][j] - 1] /* FILE *matrix = fopen("matrix.txt", "w"); */ /* for (int q = 0; q < num_elements[i + 1][j]; q++) { for (int p = 0; p < num_elements[i + 1][interactions[i][j][k]]; p++) { double sum = 0; for (int i = 0; i < memory; i++) { sum += UU[count2][q][i] * VV[count2][i][p]; } //total[elements[i + 1][j][q] - 1][elements[i + 1][interactions[i][j][k]][p] - 1] = sum; //fprintf(matrix, "%e\t", calcp(filaments[elements[a + 1][b][i] - 1], filaments[elements[a + 1][interactions[a][b][k]][j] - 1], NULL) - sum); } //fprintf(matrix, "\n"); } */ /* fclose(matrix); */ for (int q = 0; q < num_elements[i + 1][j]; q++) { UU[count2][q] = (double *)realloc(UU[count2][q], memory * sizeof(double)); //ssys->U[count2][q] = (double *)realloc(UU[count2][q], memory * sizeof(double)); } for (int p = memory; p < num_elements[i + 1][interactions[i][j][k]]; p++)//k { free(VV[count2][p]); //free(ssys->V[count2][p]); } ++count2; /* UU = (double ***)realloc(UU, ka * sizeof(double**)); UU[ka - 1] = (double **)malloc(num_elements[i + 1][j] * sizeof(double *)); for (int j = 0; j < num_elements[i + 1][j]; j++) { UU[ka - 1][j] = (double *)malloc(kk * sizeof(double)); memcpy(&(UU[ka - 1][j][0]), &(U[j][0]), kk * sizeof(double)); } VV = (double ***)realloc(VV, ka * sizeof(double**)); VV[ka - 1] = (double **)malloc(kk * sizeof(double *)); for (int j = 0; j < kk; j++) { VV[ka - 1][j] = (double *)malloc(num_elements[i + 1][interactions[i][j][k]] * sizeof(double)); memcpy(&(VV[ka - 1][j][0]), &(V[j][0]), num_elements[i + 1][interactions[i][j][k]] * sizeof(double)); } */ //memory += num_elements[i+1][j] * num_elements[i+1][interactions[i][j][k]]; } } } //for (a = 0, nextc = sys->directlist; nextc != NULL; nextc = //nextc->dnext) int see = 0; int track = 0; for (int a = 0; a < depth - 1; a++) { for (int b = 0; b < number_of_groups_level[a + 1]; b++) { for (int k = 0; k < number_of_interactions_group[a][b]; k++) { // FILE *matrix = fopen("matrix.txt", "w"); for (int i = 0; i < num_elements[a + 1][b]; i++) { for (int j = 0; j < num_elements[a + 1][interactions[a][b][k]]; j++) { long double sum = 0; for (int nn = 0; nn < KK[track]; nn++) { sum += UU[track][i][nn] * VV[track][nn][j]; } long double or = (filaments[elements[a + 1][b][i] - 1]->fil->lenvect[0] / filaments[elements[a + 1][b][i] - 1]->fil->length)*(filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil->lenvect[0] / filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil->length); or += (filaments[elements[a + 1][b][i] - 1]->fil->lenvect[1] / filaments[elements[a + 1][b][i] - 1]->fil->length)*(filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil->lenvect[1] / filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil->length); or += (filaments[elements[a + 1][b][i] - 1]->fil->lenvect[2] / filaments[elements[a + 1][b][i] - 1]->fil->length)*(filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil->lenvect[2] / filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil->length); //if(((calcp(filaments[elements[a + 1][b][i] - 1], filaments[elements[a + 1][interactions[a][b][k]][j] - 1], NULL) - sum* or)/ calcp(filaments[elements[a + 1][b][i] - 1], filaments[elements[a + 1][interactions[a][b][k]][j] - 1], NULL))>0.1) total[elements[a + 1][b][i] - 1][elements[a + 1][interactions[a][b][k]][j] - 1] = total[elements[a + 1][interactions[a][b][k]][j] - 1][elements[a + 1][b][i] - 1] = sum * or; //(mutualfill33(filaments[elements[a + 1][b][i] - 1]->fil, filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil))* or;// calcp(filaments[elements[a + 1][b][i] - 1], filaments[elements[a + 1][interactions[a][b][k]][j] - 1], NULL);//sum* or ; //mutualfill3(filaments[elements[a + 1][b][i] - 1], filaments[elements[a + 1][interactions[a][b][k]][j] - 1]); //total[elements[a + 1][interactions[a][b][k]][j] - 1][elements[a + 1][b][i] - 1] = //sum* or ; //fprintf(matrix, "%e\t", calcp(filaments[elements[a + 1][b][i] - 1], filaments[elements[a + 1][interactions[a][b][k]][j] - 1], NULL) - sum*or); see += 2; //++nummm; if (((mutualfill33(filaments[elements[a + 1][b][i] - 1]->fil, filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil)) - sum)/ mutualfill33(filaments[elements[a + 1][b][i] - 1]->fil, filaments[elements[a + 1][interactions[a][b][k]][j] - 1]->fil) > 0.0001) ++hshshshshss; } //fprintf(matrix, "\n"); } // fclose(matrix); ++track; } } } //ssys->count = track; /* FILE *matrix = fopen("matrix.txt", "w"); for (int i = 0; i < 1643; i++) { for (int j = 0; j < 1643; j++) { fprintf(matrix, "%e\t", total[i][j]); } fprintf(matrix, "\n"); } fclose(matrix); */ } //End BAP //Start BAP void branch(int ***Group_kids, double ***Center, int *paren_index, int **kids_index, int level, int group_number, double distance, int previous_unknowns, int *unknown_back, int ***interaction, int parent_index, int depth, int **interaction_number, int *memoryyy, int **num_elements, int **still_to_do, int *number_still_to_do) { int unknowns = 0; int *unknown = (int *)malloc(paren_index[level] * sizeof(int)); for (int i = 0; i < paren_index[level]; i++) { unknown[i] = 0; } int unknown_nexts = 0; int *unknown_next = (int *)malloc(paren_index[level + 1] * sizeof(int)); for (int i = 0; i < paren_index[level + 1]; i++) { unknown_next[i] = 0; } int index = 0; //write code to place the unknowns from the same group that are still able to exploit symetry //int max_kids = kids_index[level - 1][parent_index]; //int j = 0; //while (group_number != Group_kids[level - 1][parent_index][max_kids - j - 1])//goes down symetric branches //{ // unknown[unknowns] = Group_kids[level - 1][parent_index][max_kids - j - 1]; // ++j; // ++unknowns; //} interaction_number[level - 1][group_number] = 0; for (int i = 0; i < previous_unknowns; i++)//Go through all possible interactions on the level { int source = unknown_back[i]; //for (int j = 0; j < kids_index[level - 1][uncover_unknown]; j++) //{ //int source = Group_kids[level - 1][uncover_unknown][j]; double distance_x = fabs(Center[level][group_number][0] - Center[level][source][0]); double distance_y = fabs(Center[level][group_number][1] - Center[level][source][1]); double distance_z = fabs(Center[level][group_number][2] - Center[level][source][2]); if (distance_x > distance * 2 || distance_y > distance * 2 || distance_z > distance * 2) { //this is where the interactions are determained and stored in no particular order interaction[level - 1][group_number][index] = source; ++index; ++interaction_number[level - 1][group_number]; } else if (depth == level + 1) { still_to_do[group_number][number_still_to_do[group_number]] = source; ++number_still_to_do[group_number]; } else { //This is where all the pices that need to still be investigated on the next level should be stored. unknown[unknowns] = source; ++unknowns; } //} } //function compairing start_observation with all the possible interactions. for (int i = 0; i < unknowns; i++) { for (int j = 0; j < kids_index[level][unknown[i]]; j++) { unknown_next[unknown_nexts] = Group_kids[level][unknown[i]][j]; ++unknown_nexts; } } for (int i = 0; i < kids_index[level][group_number] - 1; i++)//call each time for every child go down all posible brances { unknown_next[unknown_nexts] = Group_kids[level][group_number][kids_index[level][group_number] - i - 1];; ++unknown_nexts; } if (depth == level + 1) { for (int i = 0; i < unknown_nexts; i++) { //memoryyy[1] += num_elements[5][group_number] * num_elements[5][unknown_next[i]]; } //memoryyy[0] += num_elements[5][group_number] * num_elements[5][group_number]; } if (depth > level - 1) { for (int i = 0; i < kids_index[level][group_number]; i++)//call each time for every child go down all posible brances { branch(Group_kids, Center, paren_index, kids_index, level + 1, Group_kids[level][group_number][i], distance / 2, unknown_nexts - i, unknown_next, interaction, group_number, depth, interaction_number, memoryyy, num_elements, still_to_do, number_still_to_do); } } } //End Bap //Start Bap void top_levels(int ***Group_kids, int *paren_index, double ***Center, int **kids_index, double distance, int depth, charge **fill, int **number_els, int ***els, int number_groups, int number_fils, double **total, ssystem *syys) { int **group_interaction_number; int mem = 0; int mem2 = 0; int *memoryy = (int*)malloc(2 * sizeof(int)); memoryy[0] = 0; memoryy[1] = 0; group_interaction_number = (int **)malloc(depth * sizeof(int *)); int **extra = (int **)malloc(number_groups * sizeof(int *)); for (int i = 0; i < number_groups; i++) { extra[i] = (int *)malloc(100 * sizeof(int)); } int *number_extra = (int *)malloc(number_groups * sizeof(int)); for (int i = 0; i < number_groups; i++) { number_extra[i] = 0; } for (int i = 0; i < depth; i++) { group_interaction_number[i] = (int *)malloc(number_groups * sizeof(int)); } int observation; distance *= (depth - 1); int unknowns = 0; int x = 0; int y = 0; int *unknown = (int *)malloc(paren_index[1] * sizeof(int)); int ***interaction = (int ***)malloc(depth * sizeof(int**)); for (int i = 0; i < depth - 1; i++) { interaction[i] = (int **)malloc(paren_index[i + 1] * sizeof(int *)); for (int j = 0; j < paren_index[i + 1]; j++) { interaction[i][j] = (int *)malloc((paren_index[i + 1] - 1) * sizeof(int)); } } for (int i = 0; i < paren_index[1]; i++) { unknowns = 0; if (y == kids_index[0][x]) { ++x; y = 0; } observation = Group_kids[0][x][y]; ++y; for (int j = observation + 1; j < paren_index[1]; j++) { unknown[unknowns] = j; ++unknowns; mem += number_els[1][observation] * number_els[1][j]; } mem2 += number_els[1][observation] * number_els[1][observation]; branch(Group_kids, Center, paren_index, kids_index, 1, observation, distance*2, unknowns, unknown, interaction, x, depth, group_interaction_number, memoryy, number_els, extra, number_extra); } int track = 0; for (int i = 0; i < number_groups; i++) { for (int j = 0; j < number_extra[i]; j++) { //memoryy[1] += number_els[6][i] * number_els[6][extra[i][j]]; for (int y = 0; y < number_els[depth - 1][i]; y++) { for (int t = 0; t < number_els[depth - 1][extra[i][j]]; t++) { //total[elements[a + 1][b][i] - 1][elements[a + 1][interactions[a][b][k]][j] - 1] total[els[depth - 1][i][y] - 1][els[depth - 1][extra[i][j]][t] - 1] = calcp(fill[els[depth - 1][i][y] - 1], fill[els[depth - 1][extra[i][j]][t] - 1], NULL); total[els[depth - 1][extra[i][j]][t] - 1][els[depth - 1][i][y] - 1] = calcp(fill[els[depth - 1][i][y] - 1], fill[els[depth - 1][extra[i][j]][t] - 1], NULL); track += 2; } } } } for (int i = 0; i < number_groups; i++) { for (int y = 0; y < number_els[depth - 1][i]; y++) { for (int o = 0; o < number_els[depth - 1][i]; o++) { total[els[depth - 1][i][y] - 1][els[depth - 1][i][o] - 1] = calcp(fill[els[depth - 1][i][y] - 1], fill[els[depth - 1][i][o] - 1], NULL); ++track; } } } /* for (int i = 0; i < 1643; i++) { for (int y = 0; y < 1643; y++) { total[i][y] = calcp(fill[i], fill[y], NULL); } } */ //FILE *matrix = fopen("matrix.txt", "w"); //int a = 0, b = 0; //fprintf(matrix, "%d\t", level_count[4]); //fprintf(matrix, "%d\t", level_count[4]); //for (a = 0, nextc = sys->directlist; nextc != NULL; nextc = //nextc->dnext) //for (a = 0; a < 300; a++) //{ // for (b = 0; b < 2; b++) // { // fprintf(matrix, "%d\t", memoryy[b]); // fprintf(matrix, "\n"); // } // fprintf(matrix, "\n"); // } //fclose(matrix); MLACA(depth, paren_index, group_interaction_number, interaction, number_els, els, fill,total,syys); /* FILE *matrix = fopen("matrix.txt", "w"); for (int i = 0; i < 1643; i++) { for (int j = 0; j < 1643; j++) { fprintf(matrix, "%e\t", total[i][j]- calcp(fill[i], fill[j], NULL)); } fprintf(matrix, "\n"); } fclose(matrix); */ } //end Bap /* MulMatDirect creates the matrices for the piece of the problem that is done directly exactly. */ void mulMatDirect(ssystem *sys) { //sys->matrix = (double**)malloc(23226*sizeof(double*)); //for (int i = 0; i < 23226; i++) //{ //sys->matrix[i] = (double*)malloc(23226 * sizeof(double)); //} int memoryyy = 0; int memory_off = 0; cube *nextc, *nextnbr; int i, nummats, **temp = NULL; extern double lutime, dirtime; #if DIRSOL == ON || EXPGCR == ON extern double *trimat, *sqrmat; /* flattened triangular, square matrices */ extern int up_size, eval_size; extern int *real_index; /* for map btwn condensed/expanded vectors */ #endif /* First count the number of matrices to be done directly. */ for (nextc = sys->directlist; nextc != NULL; nextc = nextc->dnext) { for (nummats = 1, i = 0; i < nextc->numnbrs; i++) { nextnbr = nextc->nbrs[i]; ASSERT(nextnbr->upnumvects > 0); nummats++; } /* Allocate space for the vects and mats. */ nextc->directnumvects = nummats; if (nummats > 0) { CALLOC(nextc->directq, nummats, double*, ON, AMSC); CALLOC(temp, nummats, int*, ON, AMSC); CALLOC(nextc->directnumeles, nummats, int, ON, AMSC); CALLOC(nextc->directmats, nummats, double**, ON, AMSC); /* CALLOC(nextc->precondmats, nummats, double**, ON, AMSC); */ } /* initialize the pointer from this cube to its part of dummy vector - save the self part found in indexkid() */ temp[0] = nextc->nbr_is_dummy[0]; nextc->nbr_is_dummy = temp; } /* Now place in the matrices. */ for (nextc = sys->directlist; nextc != NULL; nextc = nextc->dnext) { nextc->directq[0] = nextc->upvects[0]; nextc->directnumeles[0] = nextc->upnumeles[0]; /* starttimer; */ #if DIRSOL == ON || EXPGCR == ON if(nextc == sys->directlist) { if(eval_size < MAXSIZ) { fprintf(stderr, "mulMatDirect: non-block direct methods not supported\n"); exit(1); /* if this is going to work, need a special, condensing Q2P as well as some way to use it in the framework of the GCR loop */ nextc->directmats[0] = Q2P(nextc->chgs, eval_size, nextc->nbr_is_dummy[0], nextc->chgs, eval_size, TRUE); } else blkQ2Pfull(sys->directlist, up_size, eval_size, &trimat, &sqrmat, &real_index, sys->is_dummy); } else nextc->directmats[0] = Q2PDiag(nextc->chgs, nextc->upnumeles[0], nextc->nbr_is_dummy[0], TRUE); #else nextc->directmats[0] = Q2PDiag(nextc->chgs, nextc->upnumeles[0], nextc->nbr_is_dummy[0], TRUE); memoryyy += nextc->upnumeles[0] * nextc->upnumeles[0]; /* nextc->precondmats[0] = Q2PDiag(nextc->chgs, nextc->upnumeles[0], nextc->nbr_is_dummy[0], FALSE); */ /*dumpMatCor(nextc->directmats[0], (double *)NULL, nextc->upnumeles[0]);*/ #endif /* stoptimer; */ dirtime += dtime; #if DSQ2PD == ON dumpQ2PDiag(nextc); #endif #if DMTCNT == ON Q2PDcnt[nextc->level][nextc->level]++; #endif #if DIRSOL == ON /* transform A into LU */ if(eval_size > MAXSIZ) { blkLUdecomp(sqrmat, trimat, up_size); } else if(nextc == sys->directlist) { /* starttimer; */ nextc->directlu = ludecomp(nextc->directmats[0], eval_size, TRUE); /* stoptimer; */ lutime += dtime; } #endif /* starttimer; */ for (nummats = 1, i = 0; i < nextc->numnbrs; i++) { nextnbr = nextc->nbrs[i]; ASSERT(nextnbr->upnumvects > 0); nextc->directq[nummats] = nextnbr->upvects[0]; nextc->nbr_is_dummy[nummats] = nextnbr->nbr_is_dummy[0]; nextc->directnumeles[nummats] = nextnbr->upnumeles[0]; nextc->directmats[nummats] = Q2P(nextnbr->chgs, nextnbr->upnumeles[0], nextnbr->nbr_is_dummy[0], nextc->chgs, nextc->upnumeles[0], TRUE); nummats++; memory_off += nextnbr->upnumeles[0] * nextc->upnumeles[0]; /* nextc->precondmats[nummats++] = Q2P(nextnbr->chgs, nextnbr->upnumeles[0], nextnbr->nbr_is_dummy[0], nextc->chgs, nextc->upnumeles[0], FALSE); */ #if DMTCNT == ON Q2Pcnt[nextc->level][nextnbr->level]++; #endif } /* stoptimer; */ dirtime += dtime; } /* FILE * fpp; fpp = fopen("file_mlfma_direct.txt", "w+"); fprintf(fpp, " MLFMA direct memory: %d\n", memoryyy * 8); fclose(fpp); */ /* FILE * fppp; fppp = fopen("file_mlfma_direct_off.txt", "w+"); fprintf(fppp, " MLFMA direct memory: %d\n", memory_off * 8); fclose(fppp); */ } /* MulMatPrecond creates the preconditioner matrix */ void bdmulMatPrecond(ssystem *sys) { cube *nc, *kid, *kidnbr; double **mat, **nbrmat; int i, j, k, l, kidi; int kidsize, nbrsize, size, row, col, first, offset; double factor; charge *pc; surface *surf; printf("This Preconditioner is not used in FastHenry\n"); exit(1); for (nc = sys->precondlist; nc != NULL; nc = nc->pnext) { /* find total number of charges in cube to dimension P. */ for (size = 0, i = 0; i < nc->numkids; i++) { kid = nc->kids[i]; if (kid != NULL) { ASSERT(kid->level == sys->depth); size += kid->directnumeles[0]; /* Equals number of charges. */ } } /* allocate and zero a preconditioner matrix. */ MALLOC(mat, size, double*, ON, AMSC); for (i = 0; i < size; i++) { MALLOC(mat[i], size, double, ON, AMSC); } for (i = 0; i < size; i++) { for (j = 0; j < size; j++) { mat[i][j] = 0.0; } } /* Chase through the kids to place in potential coeffs. */ for (first = TRUE, row = 0, kidi = 0; kidi < nc->numkids; kidi++) { kid = nc->kids[kidi]; if (kid != NULL) { /* Exploit the hierarchical charge numbering to get precond vector. */ if (first == TRUE) { first = FALSE; nc->prevectq = kid->directq[0]; nc->prevectp = kid->eval; } /* Get the diagonal block of P^{-1}. */ kidsize = kid->directnumeles[0]; for (k = kidsize - 1; k >= 0; k--) { for (l = kidsize - 1; l >= 0; l--) { mat[row + k][row + l] = kid->directmats[0][k][l]; } } /* Get the off-diagonals of P^{-1}. */ for (col = 0, i = 0; i < nc->numkids; i++) { kidnbr = nc->kids[i]; if (kidnbr != NULL) { if (kidnbr != kid) { /* Chase thru list of nbrs to get matrix associated with this kidnbr. Note, this is because the kid list and nbr matrix list are in different orders, could be fixed. */ for (j = kid->numnbrs - 1; j >= 0; j--) { if (kidnbr == kid->nbrs[j]) { nbrmat = kid->directmats[j + 1]; nbrsize = kidnbr->directnumeles[0]; for (k = kidsize - 1; k >= 0; k--) { for (l = nbrsize - 1; l >= 0; l--) { mat[row + k][col + l] = nbrmat[k][l]; } } break; } } } col += kidnbr->directnumeles[0]; } } ASSERT(col == size); row += kidsize; } } ASSERT(row == size); nc->precond = ludecomp(mat, size, FALSE); nc->presize = size; } } /* This near picks up only the hamming distance one cubes. */ #define HNEAR(nbr, nj, nk, nl) \ ((ABS((nbr)->j - (nj)) + ABS((nbr)->k - (nk)) + ABS((nbr)->l - (nl))) <= 1) /* This near picks up all 27 neighboring cubes. */ #define NEAR(nbr, nj, nk, nl) \ ((ABS((nbr)->j - (nj)) <= 1) && \ (ABS((nbr)->k - (nk)) <= 1) && \ (ABS((nbr)->l - (nl)) <= 1)) /* This near picks only the diagonal, for testing. */ #define DNEAR(nbr, nj, nk, nl) \ (((nbr)->j == (nj)) && \ ((nbr)->k == (nk)) && \ ((nbr)->l == (nl)) ) void olmulMatPrecond(ssystem *sys) { cube *nc, *nnbr, *nnnbr; double **mat, **nmat; int i, j, k, l, m; int maxsize, nsize, nnsize, nnnsize, *reorder; int nj, nk, nl, offset, noffset; int dindex, *nc_dummy, *nnbr_dummy, *nnnbr_dummy; static int *is_dummy; /* local dummy flag vector, stays around */ static int big_mat_size = 0; /* size of previous mat */ charge **nnnbr_pc, **nnbr_pc, **nc_pc, **mpc, *dp; surface *surf; double factor; printf("This Preconditioner is not used in FastHenry\n"); exit(1); /* Figure out the max number of elements in any set of near cubes. */ for (maxsize = 0, nc = sys->directlist; nc != NULL; nc = nc->dnext) { nsize = nc->directnumeles[0]; nj = nc->j; nk = nc->k; nl = nc->l; for (i = 0; i < nc->numnbrs; i++) { nnbr = nc->nbrs[i]; if (NEAR(nnbr, nj, nk, nl)) nsize += nnbr->directnumeles[0]; } maxsize = MAX(nsize, maxsize); } /* Allocate a matrix big enough for any set of 7. */ #if JACDBG == ON printf("max direct size =%d\n", maxsize); #endif MALLOC(reorder, maxsize, int, ON, AMSC); MALLOC(mat, maxsize, double*, ON, AMSC); for (i = 0; i < maxsize; i++) { MALLOC(mat[i], maxsize, double, ON, AMSC); } /* Now go fill-in a matrix. */ for (maxsize = 0, nc = sys->directlist; nc != NULL; nc = nc->dnext) { nsize = nc->directnumeles[0]; nc_dummy = nc->nbr_is_dummy[0]; nc_pc = nc->chgs; #if CHKDUM == ON chkDummyList(nc_pc, nc_dummy, nsize); #endif nj = nc->j; nk = nc->k; nl = nc->l; for (i = nsize - 1; i >= 0; i--) { if (nc_dummy[i]) continue; /* dummy rows copied only in divided diff */ if (nc_pc[i]->surf->type != DIELEC) { for (j = nsize - 1; j >= 0; j--) { mat[i][j] = nc->directmats[0][i][j]; } } else { #if DPCOMP == ON fprintf(stdout, "Source mat, nc to nc\n"); dumpMat(nc->directmats[0], nsize, nsize); #endif find_flux_density_row(mat, nc->directmats[0], i, nsize, nsize, 0, 0, nc_pc, nc_pc, nc_dummy, nc_dummy); } } offset = nsize; for (k = 0; k < nc->numnbrs; k++) { /* loop on neighbors of nc */ nnbr = nc->nbrs[k]; if (NEAR(nnbr, nj, nk, nl)) { nnsize = nc->directnumeles[k + 1]; nmat = nc->directmats[k + 1]; ASSERT(nc->directnumeles[k + 1] == nnbr->directnumeles[0]); nnbr_dummy = nnbr->nbr_is_dummy[0]; nnbr_pc = nnbr->chgs; #if CHKDUM == ON chkDummyList(nnbr_pc, nnbr_dummy, nnsize); #endif for (i = nsize - 1; i >= 0; i--) { if (nc_dummy[i]) continue; if (nc_pc[i]->surf->type != DIELEC) { for (j = nnsize - 1; j >= 0; j--) { mat[i][offset + j] = nmat[i][j]; } } else { #if DPCOMP == ON fprintf(stdout, "Source mat, nnbr to nc\n"); dumpMat(nmat, nsize, nnsize); #endif find_flux_density_row(mat, nmat, i, nnsize, nsize, 0, offset, nc_pc, nnbr_pc, nc_dummy, nnbr_dummy); } } /* Get the row of the big matrix associated with this nnbr. */ for (noffset = 0, l = -1; l < nc->numnbrs; l++) { /* lp on nc's nbrs */ if (l < 0) nnnbr = nc; else nnnbr = nc->nbrs[l]; if (NEAR(nnnbr, nj, nk, nl)) { /* Note, near to nc!! */ if (nnbr == nnnbr) m = -1; else { /* Find this nnnbr's position in nnbr's list */ for (m = 0; m < nnbr->numnbrs; m++) { if (nnbr->nbrs[m] == nnnbr) break; } ASSERT(m < nnbr->numnbrs); } nnnsize = nnbr->directnumeles[m + 1]; nmat = nnbr->directmats[m + 1]; ASSERT( nnbr->directnumeles[m + 1] == nnnbr->directnumeles[0]); nnnbr_pc = nnnbr->chgs; /* panels in nnnbr */ nnnbr_dummy = nnnbr->nbr_is_dummy[0]; #if CHKDUM == ON chkDummyList(nnnbr_pc, nnnbr_dummy, nnnsize); #endif for (i = nnsize - 1; i >= 0; i--) { /* loop on panels in nnbr */ if (nnbr_dummy[i]) continue; if (nnbr_pc[i]->surf->type != DIELEC) { for (j = nnnsize - 1; j >= 0; j--) { mat[offset + i][noffset + j] = nmat[i][j]; } } else { #if DPCOMP == ON fprintf(stdout, "Source mat, nnnbr to nnbr\n"); dumpMat(nmat, nnsize, nnnsize); #endif find_flux_density_row(mat, nmat, i, nnnsize, nnsize, offset, noffset, nnbr_pc, nnnbr_pc, nnbr_dummy, nnnbr_dummy); } } noffset += nnnsize; } } offset += nnsize; } } /* set up the local is_dummy vector for the rows/cols of mat */ /* THIS COULD BE AVOIDED BY USING CUBE is_dummy's INSIDE invert() */ if (big_mat_size < offset) { /* allocate only if larger array needed */ CALLOC(is_dummy, offset, int, ON, AMSC); } /* dump sections of the dummy vector in order cubes appear in nbr lst */ /* (use fragment of Jacob's loop above) */ nnnsize = noffset = nc->directnumeles[0]; nc_dummy = nc->nbr_is_dummy[0]; for (i = nnnsize - 1; i >= 0; i--) { is_dummy[i] = nc_dummy[i]; } for (l = 0; l < nc->numnbrs; l++) { nnnbr = nc->nbrs[l]; if (NEAR(nnnbr, nj, nk, nl)) { nnnsize = nnnbr->directnumeles[0]; nc_dummy = nnnbr->nbr_is_dummy[0]; for (i = nnnsize - 1; i >= 0; i--) { is_dummy[i + noffset] = nc_dummy[i]; } noffset += nnnsize; } } /* The big Matrix is filled in, invert it and get the preconditioner. */ #if DPCOMP == ON fprintf(stdout, "Before compression\n"); dumpMat(mat, offset, offset); #endif nnnsize = compressMat(mat, offset, is_dummy, BOTH); #if DPCOMP == ON fprintf(stdout, "After compression\n"); dumpMat(mat, nnnsize, nnnsize); #endif invert(mat, nnnsize, NULL); expandMat(mat, offset, nnnsize, is_dummy, BOTH); #if DPCOMP == ON fprintf(stdout, "After expansion\n"); dumpMat(mat, offset, offset); #endif /* Copy out the preconditioner to the saved matrices. */ for (i = nsize - 1; i >= 0; i--) { for (j = nsize - 1; j >= 0; j--) { nc->precondmats[0][i][j] = mat[i][j]; } } offset = nsize; for (k = 0; k < nc->numnbrs; k++) { nnbr = nc->nbrs[k]; if (NEAR(nnbr, nj, nk, nl)) { nnsize = nc->directnumeles[k + 1]; nmat = nc->precondmats[k + 1]; for (i = nsize - 1; i >= 0; i--) { for (j = nnsize - 1; j >= 0; j--) { nmat[i][j] = mat[i][offset + j]; } } offset += nnsize; } else nc->precondmats[k + 1] = NULL; } } } /* finds a row of flux density coeffs from three potential coeff rows - to_mat[eval_row][] is the destination row; from_mat[eval_row][] initially contains the potential coefficients for evals at the center of eval_panels[eval_row] (unless NUMDPT == 2, is garbage then) - the eval panels are scaned until eval_panels[eval_row]'s dummies are found and the corresponding two rows are identified - the divided differences built with entries in the same columns in these three rows replace the to_mat[eval_row][] entries: to_mat[eval_row][j] = a1*from_mat[eval_row][j] + a2*from_mat[pos_dum_row][j] + a3*from_mat[neg_dum_row][j] - if a dummy panel is not found in the panel list, its row is generated using explicit calcp() calls (shouldn't happen much) - global flags used here NUMDPT = number of divided diff points, 2 or 3 SKIPDQ = ON=>don't do cancellation-prone add-subtract of identical influence of DIELEC/BOTH panels' charges on dummy panel pot. evals */ void find_flux_density_row(double **to_mat, double **from_mat, int eval_row, int n_chg, int n_eval, int row_offset, int col_offset, charge **eval_panels, charge **chg_panels, int *eval_is_dummy, int *chg_is_dummy) { int dindex, j; double factor; charge *dp; surface *surf = eval_panels[eval_row]->surf; /* do divided difference w/ three rows to get dielectric row */ #if NUMDPT == 3 /* - dielectric panel row first */ factor = -(surf->outer_perm + surf->inner_perm)/ (eval_panels[eval_row]->pos_dummy->area); #if DPDDIF == ON fprintf(stdout, "Center row, factor = %g\n", factor); #endif for(j = n_chg - 1; j >= 0; j--) { /* loop on columns */ if(!chg_is_dummy[j]) to_mat[row_offset + eval_row][col_offset + j] = from_mat[eval_row][j]*factor; #if DPDDIF == ON fprintf(stdout, " %.16e", from_mat[eval_row][j]); #endif /* #if DPDDIF == ON */ } #endif /* #if NUMDPT == 3 */ /* - do positive dummy row */ /* first find the dummy row */ dindex = -1; dp = eval_panels[eval_row]->pos_dummy; /* get dummy panel from eval panel */ for (j = n_eval - 1; j >= 0; j--) { if (!eval_is_dummy[j]) continue; if (dp == eval_panels[j]) { dindex = j; break; } } if (dindex != -1) { /* dummy row found */ #if NUMDPT == 3 factor = surf->outer_perm/eval_panels[dindex]->area; #else /* this is the only factor required for two dummy rows in two point case */ factor = (surf->inner_perm - surf->outer_perm) / (eval_panels[eval_row]->neg_dummy->area + eval_panels[eval_row]->pos_dummy->area); #endif #if DPDDIF == ON fprintf(stdout, "\nPos dummy row, factor = %g\n", factor); #endif for (j = n_chg - 1; j >= 0; j--) { #if SKIPDQ == ON if(chg_panels[j]->index == eval_panels[eval_row]->index) { to_mat[row_offset + eval_row][col_offset + j] = 0.0; continue; } #endif if (!chg_is_dummy[j]) #if NUMDPT == 3 to_mat[row_offset + eval_row][col_offset + j] += from_mat[dindex][j]*factor; #else /* make sure to overwrite possible garbage */ to_mat[row_offset + eval_row][col_offset + j] = -from_mat[dindex][j] * factor; #endif #if DPDDIF == ON fprintf(stdout, " %.16e (%d)", from_mat[dindex][j],chg_panels[j]->index); #endif } } else { /* dummy row out of cube => build it w/calcp */ #if NUMDPT == 3 factor = surf->outer_perm/dp->area; #else /* this is the only factor required for two dummy rows in two point case */ factor = (surf->inner_perm - surf->outer_perm) / (eval_panels[eval_row]->neg_dummy->area + eval_panels[eval_row]->pos_dummy->area); #endif #if DPDDIF == ON fprintf(stdout, "\nPos dummy calcp row, factor = %g\n", factor); #else fprintf(stderr, "\nolmulMatPrecond: building pos. dummy row\n"); #endif for (j = n_chg - 1; j >= 0; j--) { #if SKIPQD == ON if(chg_panels[j]->index == eval_panels[eval_row]->index) { to_mat[row_offset + eval_row][col_offset + j] = 0.0; continue; } #endif if (!chg_is_dummy[j]) { #if NUMDPT == 3 to_mat[row_offset + eval_row][col_offset + j] += calcp(chg_panels[j], dp, NULL)*factor; #else to_mat[row_offset + eval_row][col_offset + j] = -calcp( chg_panels[j], dp, NULL) * factor; #endif #if DPDDIF == ON fprintf(stdout, " %.16e (%d)", calcp(chg_panels[j], dp, NULL), chg_panels[j]->index); } else { fprintf(stdout, " dummy"); #endif } } } /* - do negative dummy row */ /* first find the dummy row */ dindex = -1; dp = eval_panels[eval_row]->neg_dummy; /* get dummy panel from eval panel */ for (j = n_eval - 1; j >= 0; j--) { if (!eval_is_dummy[j]) continue; if (dp == eval_panels[j]) { dindex = j; break; } } if (dindex != -1) { /* dummy row found */ #if NUMDPT == 3 factor = surf->inner_perm/eval_panels[dindex]->area; #endif #if DPDDIF == ON fprintf(stdout, "\nNeg dummy row, factor = %g\n", factor); #endif for (j = n_chg - 1; j >= 0; j--) { #if SKIPQD == ON if(chg_panels[j]->index == eval_panels[eval_row]->index) continue; #endif if (!chg_is_dummy[j]) to_mat[row_offset + eval_row][col_offset + j] += from_mat[dindex][j] * factor; #if DPDDIF == ON fprintf(stdout, " %.16e (%d)", from_mat[dindex][j],chg_panels[j]->index); #endif } } else { /* dummy row out of cube => build it w/calcp */ factor = surf->inner_perm / dp->area; #if DPDDIF == ON fprintf(stdout, "\nNeg dummy calcp row, factor = %g\n", factor); #else fprintf(stderr, "olmulMatPrecond: building neg. dummy row\n"); #endif for (j = n_chg - 1; j >= 0; j--) { #if SKIPQD == ON if(chg_panels[j]->index == eval_panels[eval_row]->index) continue; #endif if (!chg_is_dummy[j]) { to_mat[row_offset + eval_row][col_offset + j] += calcp( chg_panels[j], dp, NULL) * factor; #if DPDDIF == ON fprintf(stdout, " %.16e (%d)", calcp(chg_panels[j], dp, NULL), chg_panels[j]->index); } else { fprintf(stdout, " dummy"); #endif } } } #if NUMDPT == 2 /* - do row entry due to panel contribution - entry only necessary if eval panel is in chg panel list */ /* search for the eval panel in the charge panel list */ dp = NULL; for (j = n_chg - 1; j >= 0; j--) { if (!chg_is_dummy[j]) { if (eval_panels[eval_row] == chg_panels[j]) { dp = eval_panels[eval_row]; break; } } } /* set entry if eval panel found in chg panel list - this is an overwrite; contributions of other rows should cancel */ if (dp != NULL) { to_mat[row_offset + eval_row][col_offset + j] = -(2 * M_PI * (surf->inner_perm + surf->outer_perm) / eval_panels[eval_row]->area); } #endif #if DPDDIF == ON fprintf(stdout, "\nDivided difference row (%d)\n", eval_panels[eval_row]->index); for(j = n_chg - 1; j >= 0; j--) { fprintf(stdout, " %.16e (%d)", to_mat[row_offset + eval_row][col_offset + j], chg_panels[j]->index); } fprintf(stdout, "\n\n"); #endif } /* MulMatUp computes the multipole to multipole or charge to multipole matrices that map to a parent's multipole coeffs from its children's multipoles or charges. Note that only one set of multipole to multipole matrices is computed per level by exploiting the uniform break-up of three-space (ie many shifts have similar geometries). */ void mulMatUp(ssystem *sys) { cube *nextc, *kid; int i, j, numterms, depth, order = sys->order; double **multimats[8]; #if OFF == ON /* OFF == OFF produces the M2M error!! */ for(i=0; i < 8; i++) multimats[i] = NULL; #endif numterms = multerms(order); if (sys->depth < 2) { /* fprintf(stdout, "\nWarning: no multipole acceleration\n");*/ return; /* return if upward pass not possible */ } /* Handle the lowest level cubes first (set up Q2M's). */ for (nextc = sys->multilist[sys->depth]; nextc != NULL; nextc = nextc->mnext) { nextc->multisize = numterms; CALLOC(nextc->multi, numterms, double, ON, AMSC); CALLOC(nextc->upmats, 1, double**, ON, AMSC); nextc->upmats[0] = mulQ2Multi(nextc->chgs, nextc->nbr_is_dummy[0], nextc->upnumeles[0], nextc->x, nextc->y, nextc->z, order); #if DISSYN == ON multicnt[nextc->level]++; #endif #if DMTCNT == ON Q2Mcnt[nextc->level][nextc->level]++; #endif } if (sys->locallist[sys->depth] == NULL && sys->multilist[sys->depth] == NULL) { fprintf(stdout, "No expansions at level %d (lowest)\n", sys->depth); /*if(sys->depth < 3) fprintf(stdout, " (Warning: no multipole acceleration)\n");*/ } else if (sys->locallist[sys->depth] == NULL) { fprintf(stdout, "No local expansions at level %d (lowest)\n", sys->depth); } else if (sys->multilist[sys->depth] == NULL) { fprintf(stdout, "No multipole expansions at level %d (lowest)\n", sys->depth); /*if(sys->depth < 3) fprintf(stdout, " (Warning: no multipole acceleration)\n");*/ } /* Allocate the vectors and matrices for the cubes. */ /* no multipoles over root cube or its kids (would not be used if made) */ for (depth = (sys->depth - 1); depth > 1; depth--) { /* set up M2M's and Q2M's to compute the multipoles needed for this level */ if (sys->locallist[depth] == NULL && sys->multilist[depth] == NULL) { fprintf(stdout, "No expansions at level %d\n", depth); /*if(depth < 3) fprintf(stdout, " (Warning: no multipole acceleration)\n"); else fprintf(stdout, "\n");*/ } else if (sys->locallist[depth] == NULL) { fprintf(stdout, "No local expansions at level %d\n", depth); } else if (sys->multilist[depth] == NULL) { fprintf(stdout, "No multipole expansions at level %d\n", depth); /*if(depth < 3) fprintf(stdout, " (Warning: no multipole acceleration)\n"); else fprintf(stdout, "\n");*/ } #if ON == ON /* ON == OFF produces the M2M error!! */ /* NULL out pointers to same-geometry M2M mats for this level */ for (i = 0; i < 8; i++) multimats[i] = NULL; #endif /* Hit nonempty cubes at this level assigning ptrs to precomputed */ /* M2M mats (for this lev), or if a kid is exact, computing Q2M matrices. */ for (nextc = sys->multilist[depth]; nextc != NULL; nextc = nextc->mnext) { #if DISSYN == ON multicnt[nextc->level]++; #endif /* Save space for upvector sizes, upvect ptrs, and upmats. */ nextc->multisize = numterms; if (numterms > 0) { CALLOC(nextc->multi, numterms, double, ON, AMSC); } if (nextc->upnumvects) { CALLOC(nextc->upnumeles, nextc->upnumvects, int, ON, AMSC); CALLOC(nextc->upvects, nextc->upnumvects, double*, ON, AMSC); CALLOC(nextc->upmats, nextc->upnumvects, double**, ON, AMSC); } /* Go through nonempty kids and fill in upvectors and upmats. */ for (i = 0, j = 0; j < nextc->numkids; j++) { if ((kid = nextc->kids[j]) != NULL) { /* NULL => empty kid cube */ if (kid->mul_exact == FALSE) { /* if kid has a multi */ nextc->upvects[i] = kid->multi; nextc->upnumeles[i] = kid->multisize; if (multimats[j] == NULL) { /* Build the needed matrix only once. */ multimats[j] = mulMulti2Multi(kid->x, kid->y, kid->z, nextc->x, nextc->y, nextc->z, order); } nextc->upmats[i] = multimats[j]; #if DMTCNT == ON M2Mcnt[kid->level][nextc->level]++; /* cnts usage, ~computation */ #endif /* only at most 8 mats really built/level */ } else { /* if kid is exact, has no multi */ nextc->upvects[i] = kid->upvects[0]; nextc->upnumeles[i] = kid->upnumeles[0]; nextc->upmats[i] = mulQ2Multi(kid->chgs, kid->nbr_is_dummy[0], kid->upnumeles[0], nextc->x, nextc->y, nextc->z, order); #if DMTCNT == ON Q2Mcnt[kid->level][nextc->level]++; #endif } i++; /* only increments if kid is not empty */ } } } } } /* builds the transformation matrices for the final evaluation pass (M2P, L2P) for all the direct list (generated by linkcubes(), non-empty lowest level cubes) cubes: for each cube A in the direct list: 1) if the cube is not exact (always the case if ADAPT = OFF) a) and if DNTYPE = GRENGD build an L2P matrix from A to A b) and if DNTYPE = NOSHFT build an L2P matrix from each of A's ancestors with level > 1 (including A) to A c) and if DNTYPE = NOLOCL build an M2P matrix from each of A's fake ilist entries to A (same action as 2b) 2) if the cube is exact, find the 1st ancestor of A, cube B, which either is not exact and is at level 2,3,4... or is at level 1 a) if B is at level 2,3,4... i) if DNTYPE = GRENGD, construct an L2P from B to A and M2P's from the cubes in the true interaction lists of A and all its ancestors up to and including B (a partial fake interaction list) j) if DNTYPE = NOSHFT, find cube C, the ancestor of B at level 1; construct L2P's from the ancestors of B (including B but not C) to A and Q- or M2P's from the cubes in the true interaction lists of A and all its ancestors up to and including B (a partial fake interaction list) k) if DNTYPE = NOLOCL, do 2b b) if B is at level 1 construct M2P's for all the cubes in A's fake interaction list true interaction list - RADINTER = OFF, those sibling (same level) cubes of a given cube who are children of the neighbors of the given cube's parent and are not neighbors of the given cube - ie those cubes required to cover charges well separated from the given cube but not accounted for in the parent's local expansion - the flag NNBRS is the number of sibling cube "shells" taken as neighbors fake interaction list - RADINTER = OFF, the combined true interaction lists of a given cube and all its ancestors at levels 2,3,4... if RADINTER = ON, any 8 siblings of the given cube which form a well separated cube one level up are included in the lists as a single higher level cube if ADAPT = OFF, no cube is exact so step 2 is never done this routine is used alone if compiled with DNTYPE = NOLOCL or after mulMatDown, which produces M2L and L2L matrices (DNTYPE = GRENGD) or just M2L matrices (DNTYPE = NOSHFT) -- DNTYPE = GRENGD does the full Greengard hiearchical downward pass */ void mulMatEval(ssystem *sys) { int i, j, k, ttlvects, vects; cube *na, *nc, *nexti; if (sys->depth < 2) return; /* ret if upward pass not possible/worth it */ for (nc = sys->directlist; nc != NULL; nc = nc->dnext) { ASSERT(nc->level == sys->depth); ASSERT(nc->upnumvects > 0); /* allocate space for evaluation pass vectors; check nc's ancestors */ /* First count the number of transformations to do. */ for (na = nc, ttlvects = 0; na->level > 1; na = na->parent) { if (na->loc_exact == FALSE && DNTYPE != NOLOCL) { ttlvects++; /* allow for na to na local expansion (L2P) */ if (DNTYPE == GRENGD) break; /* Only one local expansion if shifting. */ } else { ttlvects += na->interSize; /* room for Q2P and M2P xformations */ } } nc->evalnumvects = ttlvects; /* save ttl # of transformations to do */ if (ttlvects > 0) { CALLOC(nc->evalvects, ttlvects, double*, ON, AMSC); CALLOC(nc->evalnumeles, ttlvects, int, ON, AMSC); CALLOC(nc->evalmats, ttlvects, double**, ON, AMSC); CALLOC(nc->eval_isQ2P, ttlvects, int, ON, AMSC); } #if DILIST == ON fprintf(stdout, "\nInteraction list (%d entries) for ", ttlvects); disExParsimpcube(nc); #endif /* set up exp/charge vectors and L2P, Q2P and/or M2P matrices as req'd */ for (j = 0, na = nc, ttlvects = 0; na->level > 1; na = na->parent) { if (na->loc_exact == FALSE && DNTYPE != NOLOCL) { /* build matrices for local expansion evaluation */ nc->evalmats[j] = mulLocal2P(na->x, na->y, na->z, nc->chgs, nc->upnumeles[0], sys->order); nc->evalnumeles[j] = na->localsize; nc->evalvects[j] = na->local; /* add_to_counts(nc, na->localsize, sys->evalL2Ps, sys->cntL2Ps); */ nc->eval_isQ2P[j] = FALSE; j++; #if DMTCNT == ON L2Pcnt[na->level][nc->level]++; #endif #if DILIST == ON fprintf(stdout, "L2P: "); disExtrasimpcube(na); #endif if (DNTYPE == GRENGD) break; /* Only one local expansion if shifting. */ } else { /* build matrices for ancestor's (or cube's if 1st time) ilist */ for (i = 0; i < na->interSize; i++) { nexti = na->interList[i]; if (nexti->mul_exact == TRUE) { nc->evalvects[j] = nexti->upvects[0]; nc->evalmats[j] = Q2P(nexti->chgs, nexti->upnumeles[0], nexti->nbr_is_dummy[0], nc->chgs, nc->upnumeles[0], TRUE); /* this is a hack to fix the fact that direct stuff is */ /* don't done componentwise */ /* obsolete as of 12/92 due to eval_isQ2P stuff fixEvalDirect(nexti->chgs, nexti->upnumeles[0], nexti->nbr_is_dummy[0], nc->chgs, nc->upnumeles[0], nc->evalmats[j]); */ nc->evalnumeles[j] = nexti->upnumeles[0]; nc->eval_isQ2P[j] = TRUE; /* add_to_counts(nc, nexti->upnumeles[0], sys->evalQ2Ps, sys->cntQ2Ps); */ j++; #if DMTCNT == ON Q2Pcnt[nexti->level][nc->level]++; #endif #if DILIST == ON fprintf(stdout, "Q2P: "); disExtrasimpcube(nexti); #endif } else { nc->evalvects[j] = nexti->multi; nc->evalmats[j] = mulMulti2P(nexti->x, nexti->y, nexti->z, nc->chgs, nc->upnumeles[0], sys->order); nc->evalnumeles[j] = nexti->multisize; nc->eval_isQ2P[j] = FALSE; /* add_to_counts(nc, nexti->multisize, sys->evalM2Ps, sys->cntM2Ps);*/ j++; #if DMTCNT == ON M2Pcnt[nexti->level][nc->level]++; #endif #if DILIST == ON fprintf(stdout, "M2P: "); disExtrasimpcube(nexti); #endif } } } } } } /* sets up matrices for the downward pass For each cube in local list (parents always in list before kids): 1) parent's local to child's local unless DNTYPE = NOSHFT or no parent local 2) multipoles for (Parent+parent's nbrs - child nbrs) to child's local -eval is sum of ancestral local evals for each lowest lev cube if NOSHFT otherwise only lowest level local is evaluated (see mulMatEval) -with ADAPT = OFF no cube is exact so local list is all non-empty cube lev>1 -mats that give potentials (M2P, L2P, Q2P) are calculated in mulMatEval() -this routine makes only L2L, M2L and Q2L matrices */ void mulMatDown(ssystem *sys) { int i, j, vects; cube *nc, *parent, *ni; int depth; ASSERT(DNTYPE != NOLOCL); /* use mulMatEval() alone if NOLOCL */ for (depth = 2; depth <= sys->depth; depth++) { /* no locals before level 2 */ for (nc = sys->locallist[depth]; nc != NULL; nc = nc->lnext) { /* Allocate for interaction list, include one for parent if needed */ if ((depth <= 2) || (DNTYPE == NOSHFT)) vects = nc->interSize; else vects = nc->interSize + 1; nc->downnumvects = vects; if (vects > 0) { CALLOC(nc->downvects, vects, double*, ON, AMSC); CALLOC(nc->downnumeles, vects, int, ON, AMSC); CALLOC(nc->downmats, vects, double**, ON, AMSC); } parent = nc->parent; ASSERT(parent->loc_exact == FALSE); /* has >= #evals of any of its kids*/ #if DISSYN == ON localcnt[nc->level]++; #endif if ((depth <= 2) || (DNTYPE == NOSHFT)) i = 0; /* No parent local. */ else { /* Create the mapping matrix for the parent to kid. */ i = 1; nc->downmats[0] = mulLocal2Local(parent->x, parent->y, parent->z, nc->x, nc->y, nc->z, sys->order); nc->downnumeles[0] = parent->localsize; nc->downvects[0] = parent->local; #if DMTCNT == ON L2Lcnt[parent->level][nc->level]++; #endif } /* Go through the interaction list and create mapping matrices. */ for (j = 0; j < nc->interSize; j++, i++) { ni = nc->interList[j]; if (ni->mul_exact == TRUE) { /* ex->ex (Q2P) xforms in mulMatEval */ nc->downvects[i] = ni->upvects[0]; nc->downmats[i] = mulQ2Local(ni->chgs, ni->upnumeles[0], ni->nbr_is_dummy[0], nc->x, nc->y, nc->z, sys->order); nc->downnumeles[i] = ni->upnumeles[0]; #if DMTCNT == ON Q2Lcnt[ni->level][nc->level]++; #endif } else { nc->downvects[i] = ni->multi; nc->downmats[i] = mulMulti2Local(ni->x, ni->y, ni->z, nc->x, nc->y, nc->z, sys->order); nc->downnumeles[i] = ni->multisize; #if DMTCNT == ON M2Lcnt[ni->level][nc->level]++; #endif } } } } }
32.938527
279
0.609021
[ "geometry", "vector", "transform" ]
ecda92f3cdb42729fbafd5eaba2a7ad39c69c8ab
4,391
h
C
SqrMelon/cgmath/cgmath.h
tdhooper/sqrmelon
873ffeec45009ee990517bbece49882bed6a56af
[ "MIT" ]
93
2018-04-14T17:29:40.000Z
2021-05-09T23:04:51.000Z
SqrMelon/cgmath/cgmath.h
tdhooper/sqrmelon
873ffeec45009ee990517bbece49882bed6a56af
[ "MIT" ]
24
2020-04-17T18:51:02.000Z
2021-06-06T15:39:26.000Z
SqrMelon/cgmath/cgmath.h
tdhooper/sqrmelon
873ffeec45009ee990517bbece49882bed6a56af
[ "MIT" ]
8
2018-04-21T12:56:32.000Z
2020-02-05T12:01:20.000Z
/* CGMath by Trevor van Hoof (C) 2017 C include Use this header to get flat functions with C-linkage support from the DLL for maximum compatibility & use from e.g. Python. All class members are converted in <ClassName>_<FunctionName> format including constructors. Destructors are renamed to <ClassName>_Delete. Methods accept a pointer to an instance of the class as first argument. There is no failsafe when an invalid void* is passed in. Memory is owned by the DLL, for garbage collected language use something like __del__ in Python or a finalizer in C# to call the appropriate delete function. The simplest wrapper can be: class Mat44(object): def __init__(self): self.raw = cgmath.Mat44_Mat44() def __del__(self): cgmath.Mat44_Delete(self.raw) With the possible addition of (for automatic method searching): def __getattr__(self, name): try: return getattr(cgmath, '{}_{}'.format(self.__class__.__name__, name)) except: return super(Mat44, self).__getattr__(name) A more integrated looking set of wrappers using the C interface of CGMath is provided (with operator overloads & pythonic naming) in the cgmath python package. */ #pragma once #ifdef EXPORT #define DLL __declspec(dllexport) #else #define DLL __declspec(dllimport) #endif extern "C" { DLL void* Mat44_Mat44(); DLL void Mat44_Delete(void* mat44); DLL void* Mat44_FromFloat16(float* data); DLL void* Mat44_Copy(void* other); DLL void Mat44_Data(void* mat44, float* target); DLL void* Mat44_RotateX(float radians); DLL void* Mat44_RotateY(float radians); DLL void* Mat44_RotateZ(float radians); DLL void* Mat44_Translate(float x, float y, float z); DLL void* Mat44_Scale(float x, float y, float z); DLL void* Mat44_Frustum(float left, float right, float top, float bottom, float near, float far); DLL void* Mat44_Perspective(float fovRadians, float aspect, float near, float far); DLL void* Mat44_TRS(float x, float y, float z, float rx, float ry, float rz, float sx, float sy, float sz); DLL void* Mat44_AxisAngle(void* axis, float angle); DLL void* Mat44_AlignVectors(void* source, void* target); DLL void* Mat44_LookAt(void* position, void* target, void* upDirection, int primaryAxis, int secondaryAxis); DLL void Mat44_Transpose(void* mat44); DLL void Mat44_Transpose33(void* mat44); // notice this isn't faster than Tranpose()! It just allows you to inverse an orthonormal transformation matrix quite cheaply through mat.Tranpose33(); m[3] = -m.data[3]; DLL void Mat44_Inverse(void* mat44); DLL void* Mat44_Multiply(void* mat44, void* other); DLL void Mat44_IMultiply(void* mat44, void* other); DLL void* Mat44_Add(void* mat44, void* other); DLL void Mat44_IAdd(void* mat44, void* other); DLL void* Mat44_AddFloat(void* mat44, float value); DLL void Mat44_IAddFloat(void* mat44, float value); DLL void* Mat44_Sub(void* mat44, void* other); DLL void Mat44_ISub(void* mat44, void* other); DLL void* Mat44_SubFloat(void* mat44, float value); DLL void Mat44_ISubFloat(void* mat44, float value); DLL void* Mat44_MulFloat(void* mat44, float value); DLL void Mat44_IMulFloat(void* mat44, float value); DLL void* Mat44_DivFloat(void* mat44, float value); DLL void Mat44_IDivFloat(void* mat44, float value); DLL void* Vector_Vector(); DLL void Vector_Delete(void* vector); DLL void* Vector_FromFloat4(float* data); DLL void* Vector_Copy(void* other); DLL void Vector_Data(void* vector, float* target); DLL void* Vector_Neg(void* a); DLL void* Vector_Sub(void* a, void* b); DLL void Vector_ISub(void* a, void* b); DLL void* Vector_Add(void* a, void* b); DLL void Vector_IAdd(void* a, void* b); DLL void* Vector_Mul(void* a, void* b); DLL void Vector_IMul(void* a, void* b); DLL void* Vector_Div(void* a, void* b); DLL void Vector_IDiv(void* a, void* b); DLL void* Vector_SubFloat(void* a, float b); DLL void Vector_ISubFloat(void* a, float b); DLL void* Vector_AddFloat(void* a, float b); DLL void Vector_IAddFloat(void* a, float b); DLL void* Vector_MulFloat(void* a, float b); DLL void Vector_IMulFloat(void* a, float b); DLL void* Vector_DivFloat(void* a, float b); DLL void Vector_IDivFloat(void* a, float b); DLL float Vector_Dot(void* a, void* b); DLL void* Vector_Cross(void* a, void* b); DLL void* Vector_Normalized(void* a); DLL void* Mat44_Row(void* mat44, int index); DLL void* Mat44_MultiplyVector(void* mat44, void* vector); }
41.819048
212
0.746527
[ "object", "vector" ]
ece1ab6409aa70fba8e9425e8b9c4fb2073d790d
2,176
h
C
src/resource/cache/pmeshcache_private.h
lihw/paper3d
a06d056b2ad894a8065b7a996eb7f6ceefec1511
[ "MIT" ]
34
2015-01-29T12:27:25.000Z
2022-03-09T08:07:11.000Z
src/resource/cache/pmeshcache_private.h
lihw/paper3d
a06d056b2ad894a8065b7a996eb7f6ceefec1511
[ "MIT" ]
1
2015-02-04T07:26:50.000Z
2015-02-04T07:36:55.000Z
src/resource/cache/pmeshcache_private.h
lihw/paper3d
a06d056b2ad894a8065b7a996eb7f6ceefec1511
[ "MIT" ]
12
2015-03-24T22:16:53.000Z
2018-07-22T02:09:49.000Z
// pmeshcache_private.h // Mesh data // // Copyright 2012 - 2014 Future Interface. // This software is licensed under the terms of the MIT license. // // Hongwei Li lihw81@gmail.com // #ifndef PMESHCACHE_PRIVATE_H #define PMESHCACHE_PRIVATE_H #include "pcacheobject_private.h" #include <PFoundation/pbox.h> #include <PFoundation/pvector3.h> #include <PFoundation/pglvertexbuffer.h> class P_DLLEXPORT PMeshCache : public PCacheObject { friend class PResourceCache; friend class PMeshLoader; PMeshCache(const PMeshCache &other) : PCacheObject(P_NULL, P_NULL, true) {} void operator=(const PMeshCache &other) {} public: P_INLINE const PBox &boundingBox() const { return m_boundingBox; } P_INLINE pfloat32 *vertices() const { return m_vertices; }; P_INLINE puint16 *indices() const { return m_indices; }; P_INLINE puint32 numberOfIndices() const { return m_numIndices; } P_INLINE puint32 numberOfVertices() const { return m_numVertices; } P_INLINE PGlVertexFormat *vertexFormat() const { return m_vertexFormat; } void update(pfloat32 *vertices, puint32 numVertices, puint16 *indices, puint32 numIndices, const PBox &box, PGlVertexFormat *vertexFormat); protected: virtual void swapIn(); virtual void swapOut(); private: PMeshCache(PResourceCache *parent, const pchar *id); PMeshCache(PResourceCache *parent, const pchar *id, pfloat32 *vertices, puint32 numVertices, puint16 *indices, puint32 numIndices, const PBox &box, PGlVertexFormat *vertexFormat, pbool autoRelease = true); virtual ~PMeshCache(); pbool createMesh(const pchar *path); private: // FIXME: default SRT are exported from FBXNode. In fact, mesh doesn't acquire // a local transformat. It should be properties of drawable which the counterpart // of node in FBX scene. PBox m_boundingBox; pfloat32 *m_vertices; puint32 m_numVertices; puint16 *m_indices; puint32 m_numIndices; PGlVertexFormat *m_vertexFormat; }; #endif // !PMESHCACHE_PRIVATE_H
29.405405
97
0.690257
[ "mesh" ]
ece4f0c29855a6f8299aab280436667bb14b76ac
4,605
c
C
ext/src/Type/Custom.c
dave-d-code/cassandra-php-driver
29e32d501b144b1071896c2228c32f1af6e1efcd
[ "Apache-2.0" ]
null
null
null
ext/src/Type/Custom.c
dave-d-code/cassandra-php-driver
29e32d501b144b1071896c2228c32f1af6e1efcd
[ "Apache-2.0" ]
null
null
null
ext/src/Type/Custom.c
dave-d-code/cassandra-php-driver
29e32d501b144b1071896c2228c32f1af6e1efcd
[ "Apache-2.0" ]
null
null
null
/** * Copyright 2015-2016 DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "php_driver.h" #include "php_driver_types.h" #include "util/types.h" zend_class_entry *php_driver_type_custom_ce = NULL; PHP_METHOD(TypeCustom, __construct) { zend_throw_exception_ex(php_driver_logic_exception_ce, 0 TSRMLS_CC, "Instantiation of a " PHP_DRIVER_NAMESPACE "\\Type\\Custom type is not supported." ); return; } PHP_METHOD(TypeCustom, name) { php_driver_type *custom; if (zend_parse_parameters_none() == FAILURE) { return; } custom = PHP_DRIVER_GET_TYPE(getThis()); PHP5TO7_RETVAL_STRING(custom->name); } PHP_METHOD(TypeCustom, __toString) { php_driver_type *custom; if (zend_parse_parameters_none() == FAILURE) { return; } custom = PHP_DRIVER_GET_TYPE(getThis()); PHP5TO7_RETVAL_STRING(custom->name); } PHP_METHOD(TypeCustom, create) { zend_throw_exception_ex(php_driver_logic_exception_ce, 0 TSRMLS_CC, "Instantiation of a " PHP_DRIVER_NAMESPACE "\\Type\\Custom instance is not supported." ); return; } ZEND_BEGIN_ARG_INFO_EX(arginfo_none, 0, ZEND_RETURN_VALUE, 0) ZEND_END_ARG_INFO() ZEND_BEGIN_ARG_INFO_EX(arginfo_value, 0, ZEND_RETURN_VALUE, 0) ZEND_ARG_INFO(0, value) ZEND_END_ARG_INFO() static zend_function_entry php_driver_type_custom_methods[] = { PHP_ME(TypeCustom, __construct, arginfo_none, ZEND_ACC_PUBLIC) PHP_ME(TypeCustom, name, arginfo_none, ZEND_ACC_PUBLIC) PHP_ME(TypeCustom, __toString, arginfo_none, ZEND_ACC_PUBLIC) PHP_ME(TypeCustom, create, arginfo_value, ZEND_ACC_PUBLIC) PHP_FE_END }; static zend_object_handlers php_driver_type_custom_handlers; static HashTable * php_driver_type_custom_gc(zval *object, php5to7_zval_gc table, int *n TSRMLS_DC) { *table = NULL; *n = 0; return zend_std_get_properties(object TSRMLS_CC); } static HashTable * php_driver_type_custom_properties(zval *object TSRMLS_DC) { php5to7_zval name; php_driver_type *self = PHP_DRIVER_GET_TYPE(object); HashTable *props = zend_std_get_properties(object TSRMLS_CC); PHP5TO7_ZVAL_MAYBE_MAKE(name); PHP5TO7_ZVAL_STRING(PHP5TO7_ZVAL_MAYBE_P(name), self->name); PHP5TO7_ZEND_HASH_UPDATE(props, "name", sizeof("name"), PHP5TO7_ZVAL_MAYBE_P(name), sizeof(zval)); return props; } static int php_driver_type_custom_compare(zval *obj1, zval *obj2 TSRMLS_DC) { php_driver_type* type1 = PHP_DRIVER_GET_TYPE(obj1); php_driver_type* type2 = PHP_DRIVER_GET_TYPE(obj2); return php_driver_type_compare(type1, type2 TSRMLS_CC); } static void php_driver_type_custom_free(php5to7_zend_object_free *object TSRMLS_DC) { php_driver_type *self = PHP5TO7_ZEND_OBJECT_GET(type, object); if (self->data_type) cass_data_type_free(self->data_type); if (self->name) { efree(self->name); self->name = NULL; } zend_object_std_dtor(&self->zval TSRMLS_CC); PHP5TO7_MAYBE_EFREE(self); } static php5to7_zend_object php_driver_type_custom_new(zend_class_entry *ce TSRMLS_DC) { php_driver_type *self = PHP5TO7_ZEND_OBJECT_ECALLOC(type, ce); self->type = CASS_VALUE_TYPE_CUSTOM; self->data_type = cass_data_type_new(self->type); self->name = NULL; PHP5TO7_ZEND_OBJECT_INIT_EX(type, type_custom, self, ce); } void php_driver_define_TypeCustom(TSRMLS_D) { zend_class_entry ce; INIT_CLASS_ENTRY(ce, PHP_DRIVER_NAMESPACE "\\Type\\Custom", php_driver_type_custom_methods); php_driver_type_custom_ce = php5to7_zend_register_internal_class_ex(&ce, php_driver_type_ce); memcpy(&php_driver_type_custom_handlers, zend_get_std_object_handlers(), sizeof(zend_object_handlers)); php_driver_type_custom_handlers.get_properties = php_driver_type_custom_properties; #if PHP_VERSION_ID >= 50400 php_driver_type_custom_handlers.get_gc = php_driver_type_custom_gc; #endif php_driver_type_custom_handlers.compare_objects = php_driver_type_custom_compare; php_driver_type_custom_ce->ce_flags |= PHP5TO7_ZEND_ACC_FINAL; php_driver_type_custom_ce->create_object = php_driver_type_custom_new; }
29.14557
105
0.769815
[ "object" ]
ece620c0767797298f53af0bded5af30ea04291a
205,098
c
C
imd_param.c
fmqeisfeld/IMD
38c053355a1fa43168d3c785d8b55d789b07f222
[ "MIT" ]
2
2021-05-30T08:23:34.000Z
2021-07-08T07:49:51.000Z
imd_param.c
fmqeisfeld/IMD
38c053355a1fa43168d3c785d8b55d789b07f222
[ "MIT" ]
null
null
null
imd_param.c
fmqeisfeld/IMD
38c053355a1fa43168d3c785d8b55d789b07f222
[ "MIT" ]
null
null
null
/****************************************************************************** * * IMD -- The ITAP Molecular Dynamics Program * * Copyright 1996-2012 Institute for Theoretical and Applied Physics, * University of Stuttgart, D-70550 Stuttgart * ******************************************************************************/ /***************************************************************************** * * read in parameter files (tag based) MH 260298 * * $Revision$ * $Date$ * ******************************************************************************/ /* the following is needed for gettimeofday */ #include <sys/time.h> #include "imd.h" #if defined(CBE) #include "imd_cbe.h" #endif typedef enum ParamType { PARAM_STR, PARAM_STRPTR, PARAM_CHARPTR, PARAM_INT, PARAM_INT_COPY, PARAM_INTEGER, PARAM_INTEGER_COPY, PARAM_REAL, PARAM_REAL_COPY } PARAMTYPE; int curline; /* number of current line */ /***************************************************************************** * * Parameter aus Zeile auslesen / get parameter from line * *****************************************************************************/ /* Parameter: param_name ... Parametername (fuer Fehlermeldungen) param ........ Adresse der Variable fuer den Parameter ptype ........ Parametertyp folgende Werte sind zulaessig: PARAM_STR : String, deklariert als char[] PARAM_STRPTR : String, deklariert als Zeiger auf char* PARAM_CHARPTR : String, deklariert als char** PARAM_INT : Integer-Wert(e) PARAM_INT_COPY : Integer-Wert(e), kopierend PARAM_REAL : Real-Wert(e) PARAM_REAL_COPY : Real-Wert(e), kopierend pnum_min ..... Minimale Anzahl der einzulesenden Werte (Falls weniger Werte gelesen werden koennen als verlangt, wird ein Fehler gemeldet). pnum_max ..... Maximale Anzahl der einzulesenden Werte (Die nicht kopierenden Routinen lesen hoechstens pnum_max Werte aus der uebergebenen Zeile ein, weitere Werte werden ignoriert. Falls weniger als pnum_max Werte vorhanden sind, wird das Lesen abgebrochen, es wird kein Fehler gemeldet, wenn mindestens pnum_min Werte abgesaettigt wurden. Die kopierenden Routinen melden ebenfalls keinen Fehler, wenn mindestens pnum_min Werte abgesaettigt wurden. Falls weniger als pnum_max Werte vorhanden sind, werden die restlichen Werte mit Kopien des zuletzt gelesenen Werts aufgefuellt. Resultat: nichtkopierende Routinen: Die Anzahl der gelesenen Werte wird zurueckgegeben. kopierende Routinen: Die Anzahl der tatsaechlich gelesenen Werte wird zurueckgegeben. Resultat = pnum_max - Anzahl der Kopien */ static int getparam(char *param_name, void *param, PARAMTYPE ptype, int pnum_min, int pnum_max) { static char errmsg[256]; char *str; int i; int numread; numread = 0; if (ptype == PARAM_STR) { str = strtok(NULL," =\t\r\n"); if (str == NULL) { sprintf(errmsg,"parameter for %s missing in line %u\nstring expected", param_name,curline); error(errmsg); } else strncpy((char *)param,str,pnum_max); numread++; } else if (ptype == PARAM_STRPTR) { str = strtok(NULL," =\t\r\n"); if (str == NULL) { sprintf(errmsg,"parameter for %s missing in line %u\nstring expected", param_name,curline); error(errmsg); } else *((char**)param) = strdup(str); numread++; } else if (ptype == PARAM_CHARPTR) { i = 0; str = strtok(NULL," =\t\r\n"); while (str != NULL && i < ntypes) { strncpy(((char **)param)[i],str,strlen(str)); str = strtok(NULL," =\t\r\n"); i++; }; if (i < ntypes) { sprintf(errmsg, "parameter for %s missing in line %d\nlist of %d strings expected", param_name, curline, ntypes); error(errmsg); } } else if (ptype == PARAM_INT) { for (i=0; i<pnum_min; i++) { str = strtok(NULL," =\t\r\n"); if (str == NULL) { sprintf(errmsg,"parameter for %s missing in line %u\n", param_name,curline); sprintf(errmsg+strlen(errmsg),"integer vector of dim %u expected", (unsigned)pnum_min); error(errmsg); } else ((int*)param)[i] = atoi(str); numread++; } for (i=pnum_min; i<pnum_max; i++) { if ((str = strtok(NULL," =\t\r\n")) != NULL) { ((int*)param)[i] = atoi(str); numread++; } else break; } } else if (ptype == PARAM_INT_COPY) { int ival = 0; for (i=0; i<pnum_max; i++) { str = strtok(NULL," =\t\r\n"); if (str != NULL) { ival = atoi(str); numread++; /* return number of parameters actually read */ } else if (i<pnum_min) { sprintf(errmsg,"parameter for %s missing in line %u\n", param_name,curline); sprintf(errmsg+strlen(errmsg),"integer vector of dim %u expected", (unsigned)pnum_min); error(errmsg); } ((int*)param)[i] = ival; } } else if (ptype == PARAM_INTEGER) { for (i=0; i<pnum_min; i++) { str = strtok(NULL," =\t\r\n"); if (str == NULL) { sprintf(errmsg,"parameter for %s missing in line %u\n", param_name,curline); sprintf(errmsg+strlen(errmsg),"integer vector of dim %u expected", (unsigned)pnum_min); error(errmsg); } else ((integer*)param)[i] = atoi(str); numread++; } for (i=pnum_min; i<pnum_max; i++) { if ((str = strtok(NULL," =\t\r\n")) != NULL) { ((integer*)param)[i] = atoi(str); numread++; } else break; } } else if (ptype == PARAM_INTEGER_COPY) { int ival = 0; for (i=0; i<pnum_max; i++) { str = strtok(NULL," =\t\r\n"); if (str != NULL) { ival = atoi(str); numread++; /* return number of parameters actually read */ } else if (i<pnum_min) { sprintf(errmsg,"parameter for %s missing in line %u\n", param_name,curline); sprintf(errmsg+strlen(errmsg),"integer vector of dim %u expected", (unsigned)pnum_min); error(errmsg); } ((integer*)param)[i] = (integer)ival; } } else if (ptype == PARAM_REAL) { for (i=0; i<pnum_min; i++) { str = strtok(NULL," =\t\r\n"); if (str == NULL) { sprintf(errmsg,"parameter for %s missing in line %u\n", param_name,curline); sprintf(errmsg+strlen(errmsg),"real vector of dim %u expected", (unsigned)pnum_min); error(errmsg); } else ((real*)param)[i] = atof(str); numread++; } for (i=pnum_min; i<pnum_max; i++) { if ((str = strtok(NULL," =\t\r\n")) != NULL) { ((real*)param)[i] = atof(str); numread++; } else break; } } else if (ptype == PARAM_REAL_COPY) { real rval = 0; for (i=0; i<pnum_max; i++) { str = strtok(NULL," =\t\r\n"); if (str != NULL) { rval = atof(str); numread++; /* return number of parameters actually read */ } else if (i<pnum_min) { sprintf(errmsg,"parameter for %s missing in line %u\n", param_name,curline); sprintf(errmsg+strlen(errmsg),"real vector of dim %u expected", (unsigned)pnum_min); error(errmsg); } ((real*)param)[i] = rval; } } return numread; } /* getparam */ /***************************************************************************** * * read in parameter file in new format (tag based) with name <paramfname> * * lines beginning with comment characters '#' or blank lines are skipped * *****************************************************************************/ int getparamfile(char *paramfname, int phase) { FILE *pf; char buffer[1024]; char *token; char *res; str255 tmpstr; int tmp, finished = 0; real rtmp; #ifdef EXTPOT real rtmp4[4]; #endif #ifdef TWOD vektor3d tempforce; vektor nullv={0.0,0.0}; vektor3d tempvek; vektor einsv={1.0,1.0}; vektor3d tempshift; #else vektor4d tempforce; vektor nullv={0.0,0.0,0.0}; vektor4d tempvek; vektor einsv={1.0,1.0,1.0}; vektor4d tempshift; #endif vektor force; vektor vek; vektor shift; vektor shear, base; int k; int i; vektor3d tempv3d; real norm_bend_axis; ivektor2d tmp_ivec2d INIT(nullvektor2d); curline = 0; pf = fopen(paramfname,"r"); if (NULL == pf) { error_str("Could not open parameter file %s", paramfname); } /* set the random number generator seed to the */ /* negative of the current time in seconds */ /* this will be superseded by a fixed value from the parameter file */ { struct timeval tv; gettimeofday(&tv,NULL); seed = (long) -tv.tv_sec; } do { res=fgets(buffer,1024,pf); if (NULL == res) { finished=1; break; }; /* probably EOF reached */ curline++; /* delete comments */ res = strchr(buffer, '#'); if (res) *res = '\0'; token = strtok(buffer," =\t\r\n"); if (NULL == token) continue; /* skip blank lines */ if (strcasecmp(token,"simulation")==0) { /* get number of the simulation phase */ getparam(token,&tmp,PARAM_INT,1,1); if (phase < tmp) break; } #ifdef DEBUG else if (strcasecmp(token,"force_celldim_divisor")==0) { getparam(token,&force_celldim_divisor,PARAM_INT,3,3); } #endif else if (strcasecmp(token,"maxwalltime")==0) { /* maximal walltime limit */ getparam(token,&maxwalltime,PARAM_REAL,1,1); } else if (strcasecmp(token,"watch_int")==0) { /* interval for checking write file */ getparam(token,&watch_int,PARAM_INT,1,1); stop_int = watch_int; } else if (strcasecmp(token,"stop_int")==0) { /* interval for checking stop file */ getparam(token,&stop_int,PARAM_INT,1,1); } else if (strcasecmp(token,"hyper_threads")==0) { /* number of hyperthreads per CPU */ getparam(token,&hyper_threads,PARAM_INT,1,1); } else if (strcasecmp(token,"loop")==0) { /* looping for online visualisation */ getparam(token,&loop,PARAM_INT,1,1); } else if (strcasecmp(token,"seed")==0) { /* seed for random number generator in maxwell */ int tmp; getparam("seed",&tmp,PARAM_INT,1,1); seed = (long) tmp; if (seed > 0) seed = -seed; } else if (strcasecmp(token,"do_maxwell")==0) { /* force temperature initialization */ getparam(token,&do_maxwell,PARAM_INT,1,1); } else if (strcasecmp(token,"box_from_header")==0) { /* read box from config file */ getparam(token,&box_from_header,PARAM_INT,1,1); } else if (strcasecmp(token,"coordname")==0) { /* file name for atom coordinate input data */ getparam("coordname",infilename,PARAM_STR,1,255); } else if (strcasecmp(token,"itrname")==0) { /* file name for initial itr-file */ getparam(token,itrfilename,PARAM_STR,1,255); } else if (strcasecmp(token,"outfiles")==0) { /* output file basename */ getparam("outfiles",outfilename,PARAM_STR,1,255); } else if (strcasecmp(token,"potfile")==0) { /* filename for potential data */ getparam("potfile",potfilename,PARAM_STR,1,255); have_potfile = 1; } else if (strcasecmp(token,"ensemble")==0) { /* ensemble */ getparam(token,tmpstr,PARAM_STR,1,255); if (strcasecmp(tmpstr,"nve")==0) { ensemble = ENS_NVE; move_atoms = move_atoms_nve; } else if (strcasecmp(tmpstr,"mik")==0) { ensemble = ENS_MIK; move_atoms = move_atoms_mik; } else if (strcasecmp(tmpstr,"nvt")==0) { ensemble = ENS_NVT; move_atoms = move_atoms_nvt; } else if (strcasecmp(tmpstr,"nvx")==0) { ensemble = ENS_NVX; move_atoms = move_atoms_nvx; } else if (strcasecmp(tmpstr,"npt_iso")==0) { ensemble = ENS_NPT_ISO; move_atoms = move_atoms_npt_iso; } else if (strcasecmp(tmpstr,"npt_axial")==0) { ensemble = ENS_NPT_AXIAL; move_atoms = move_atoms_npt_axial; } else if (strcasecmp(tmpstr,"glok")==0) { ensemble = ENS_GLOK; move_atoms = move_atoms_nve; } else if (strcasecmp(tmpstr,"and")==0) { error("please use nve ensemble with option and"); } else if (strcasecmp(tmpstr,"frac")==0) { ensemble = ENS_FRAC; move_atoms = move_atoms_frac; } else if (strcasecmp(tmpstr,"ftg")==0) { ensemble = ENS_FTG; move_atoms = move_atoms_ftg; } else if (strcasecmp(tmpstr,"finnis")==0) { ensemble = ENS_FINNIS; move_atoms = move_atoms_finnis; } else if (strcasecmp(tmpstr,"sllod")==0) { ensemble = ENS_SLLOD; move_atoms = move_atoms_sllod; } else if (strcasecmp(tmpstr,"stm")==0) { ensemble = ENS_STM; move_atoms = move_atoms_stm; } #ifdef CG else if (strcasecmp(tmpstr,"cg")==0) { ensemble = ENS_CG; move_atoms = move_atoms_cg; } #endif else if (strcasecmp(tmpstr,"ttm")==0) { ensemble = ENS_TTM; move_atoms = move_atoms_ttm; } else { error("unknown ensemble"); } } else if (strcasecmp(token,"maxsteps")==0) { /* number of steps for total simulation */ getparam("maxsteps",&steps_max,PARAM_INT,1,1); } else if (strcasecmp(token,"startstep")==0) { /* (re)starting step for the simulation */ getparam("startstep",&steps_min,PARAM_INT,1,1); } else if (strcasecmp(token,"checkpt_int")==0) { /* number of steps between checkpoints / period for checkpoints */ getparam("checkpt_int",&checkpt_int,PARAM_INT,1,1); } else if (strcasecmp(token,"eng_int")==0) { /* energy data output interval */ getparam("eng_int",&eng_int,PARAM_INT,1,1); } else if (strcasecmp(token,"flush_int")==0) { /* interval for flushing .eng file */ getparam(token,&flush_int,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_int")==0) { /* number of steps between energy dist. writes */ getparam(token,&dist_int,PARAM_INT,1,1); } //MYMOD else if (strcasecmp(token,"dist_mdtemp_flag")==0) { /* write average sample velocity? */ getparam(token,&dist_mdtemp_flag,PARAM_INT,1,1); } //ENDOF MYMOD else if (strcasecmp(token,"dist_dim")==0) { /* dimension of distributions */ getparam(token,&dist_dim,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"dist_ll")==0) { /* lower left corner of distributions */ getparam(token,&dist_ll,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"dist_ur")==0) { /* upper right corner of distribution */ getparam(token,&dist_ur,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"dist_Ekin_flag")==0) { /* write Ekin dist? */ getparam(token,&dist_Ekin_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_Epot_flag")==0) { /* write Epot dist? */ getparam(token,&dist_Epot_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_press_flag")==0) { /* write press dist? */ getparam(token,&dist_press_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_pressoff_flag")==0) { /* write press dist? */ getparam(token,&dist_pressoff_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_presstens_flag")==0) { /* write pressoff dist? */ getparam(token,&dist_presstens_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_Ekin_long_flag")==0) { /* write longitudinal Ekin dist? */ getparam(token,&dist_Ekin_long_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_Ekin_trans_flag")==0) { /* write transversal Ekin dist? */ getparam(token,&dist_Ekin_trans_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_Ekin_comp_flag")==0) { /* write difference Ekin dist? */ getparam(token,&dist_Ekin_comp_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_shock_shear_flag")==0) { /* write shock shear dist? */ getparam(token,&dist_shock_shear_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_shear_aniso_flag")==0) { /* write shear aniso dist? */ getparam(token,&dist_shear_aniso_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_dens_flag")==0) { /* write density dist? */ getparam(token,&dist_dens_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"dist_vxavg_flag")==0) { /* write average sample velocity? */ getparam(token,&dist_vxavg_flag,PARAM_INT,1,1); } else if (strcasecmp(token,"pic_int")==0) { /* number of steps between picture writes */ getparam("pic_int",&pic_int,PARAM_INT,1,1); } else if (strcasecmp(token,"pbc_dirs")==0) { /* directions with periodic boundary conditions */ getparam("pbc_dirs",&pbc_dirs,PARAM_INT,DIM,DIM); } #ifdef NBLIST else if (strcasecmp(token,"nbl_margin")==0) { /* margin of neighbor list */ getparam(token,&nbl_margin,PARAM_REAL,1,1); } else if (strcasecmp(token,"nbl_size")==0) { /* size of neighbor list */ getparam(token,&nbl_size,PARAM_REAL,1,1); } #endif #ifdef NEB else if (strcasecmp(token,"neb_nrep")==0) { /* number of NEB replicas */ getparam(token,&neb_nrep,PARAM_INT,1,1); if (0==myrank) { if (num_cpus != neb_nrep) error("We need exactly neb_nrep MPI processes"); if (neb_nrep>NEB_MAXNREP) error("Too many images for NEB"); } } else if (strcasecmp(token,"neb_eng_int")==0) { /* interval of NEB energy writes */ getparam(token,&neb_eng_int,PARAM_INT,1,1); } else if (strcasecmp(token,"neb_cineb_start")==0) { /* when to change to CINEB */ getparam(token,&neb_cineb_start,PARAM_INT,1,1); } else if (strcasecmp(token,"neb_climbing_image")==0) { /* which image should be climbing */ getparam(token,&neb_climbing_image,PARAM_INT,1,1); } else if (strcasecmp(token,"neb_vark_start")==0) { /* when to change to variable ks */ getparam(token,&neb_vark_start,PARAM_INT,1,1); } else if (strcasecmp(token,"neb_k")==0) { /* spring constant of NEB */ getparam(token,&neb_k,PARAM_REAL,1,1); } else if (strcasecmp(token,"neb_maxmove")==0) { /* constrain relaxation steps */ getparam(token,&neb_maxmove,PARAM_REAL,1,1); } else if (strcasecmp(token,"neb_kmax")==0) { /* if >0 variable springs are used with max. spring constant */ getparam(token,&neb_kmax,PARAM_REAL,1,1); } else if (strcasecmp(token,"neb_kmin")==0) { /* if >0 variable springs are used with max. spring constant */ getparam(token,&neb_kmin,PARAM_REAL,1,1); } #endif #ifdef BBOOST else if (strcasecmp(token,"bb_tot_bV")==0) { /* magnitude of boost potential, unit according to potential */ getparam(token,&bb_tot_bV,PARAM_REAL,1,1); } else if (strcasecmp(token,"bb_p1_2")==0) { /* curvature controller of the boost potential */ getparam(token,&p1_2,PARAM_REAL,1,1); } else if (strcasecmp(token,"bb_relaxsteps_max")==0) { /* max number of steps for the relaxation part of the bond boos method */ getparam(token,&bb_relaxsteps_max,PARAM_INT,1,1); } else if (strcasecmp(token,"bb_shdn_max")==0) { /* max number of steps for the shutdown part of the bond boos method */ getparam(token,&bb_shdn_max,PARAM_INT,1,1); } else if (strcasecmp(token,"bb_under_max")==0) { /* max number of steps for the under boosting part of the bond boos method */ getparam(token,&bb_under_max,PARAM_INT,1,1); } else if (strcasecmp(token,"bb_epscrit")==0) { if (ntypes==0) error("specify parameter ntypes before bb_epscrit"); /* critical fraction of bond length to consider a bond broken */ /* format: type1 type2 epscrit */ getparam(token,&tempv3d,PARAM_REAL,3,3); if (tempv3d.x>ntypes-1 || tempv3d.y>ntypes-1 ) error("bb_epscrit defined for non existing type of bond\n"); bb_epscrit[(int)(tempv3d.x)][(int)(tempv3d.y)] = tempv3d.z; } else if (strcasecmp(token,"bb_rcut")==0) { /* the cut off for the range of the bond boos method */ getparam(token,&bb_rcut,PARAM_REAL,1,1); } #endif #ifdef VEC else if (strcasecmp(token,"atoms_per_cpu")==0) { /* maximal number of atoms per CPU */ getparam(token,&atoms_per_cpu,PARAM_INT,1,1); } #endif #ifdef EFILTER else if (strcasecmp(token,"ef_checkpt_int")==0) { /* number of steps between energy filtered checkpoints */ getparam("ef_checkpt_int",&ef_checkpt_int,PARAM_INT,1,1); } else if (strcasecmp(token,"e_pot_lower")==0) { if (ntypes==0) error("specify parameter ntypes before e_pot_lower"); getparam("e_pot_lower",lower_e_pot,PARAM_REAL,ntypes,ntypes); } else if (strcasecmp(token,"e_pot_upper")==0) { if (ntypes==0) error("specify parameter ntypes before e_pot_upper"); getparam("e_pot_upper",upper_e_pot,PARAM_REAL,ntypes,ntypes); } #endif #ifdef CLONE else if (strcasecmp(token,"nclones")==0) { /* number of clones to deal with*/ getparam(token,&nclones,PARAM_INT,1,1); } #endif #ifdef NNBR else if (strcasecmp(token,"nb_rcut")==0) { /* cutoff radius for coordination number */ if (ntypes==0) error("specify parameter ntypes before nb_rcut"); getparam(token,nb_r2_cut,PARAM_REAL,ntypes*ntypes,ntypes*ntypes); for (k=0; k<ntypes*ntypes; k++) nb_r2_cut[k] = SQR(nb_r2_cut[k]); } else if (strcasecmp(token,"nb_checkpt_int")==0) { getparam("nb_checkpt_int",&nb_checkpt_int,PARAM_INT,1,1); } else if (strcasecmp(token,"nb_cut_lower")==0) { if (ntypes==0) error("specify parameter ntypes before nb_cut_lower"); getparam("nb_cut_lower",lower_nb_cut,PARAM_INT,ntypes,ntypes); } else if (strcasecmp(token,"nb_cut_upper")==0) { if (ntypes==0) error("specify parameter ntypes before nb_cut_upper"); getparam("nb_cut_upper",upper_nb_cut,PARAM_INT,ntypes,ntypes); } #endif else if (strcasecmp(token,"total_types")==0) { /* TOTAL nuber of atom types: ntypes + virtual types */ int vt, init = (vtypes==0); getparam(token,&vt,PARAM_INT,1,1); if (init) { vtypes = vt; } else if (vt != vtypes) { error("total_types must be constant during a simulation"); } /* do some allocations and initialisations */ if (init) { restrictions = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==restrictions) error("Cannot allocate memory for restriction vectors\n"); for (k=0; k<vtypes; k++) restrictions[k] = einsv; #ifdef FBC /* Allocation & Initialisation of fbc_forces */ fbc_forces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_forces) error("Cannot allocate memory for fbc_forces\n"); for (k=0; k<vtypes; k++) fbc_forces[k] = nullv; fbc_beginforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_beginforces) error("Cannot allocate memory for fbc_beginforces\n"); for (k=0; k<vtypes; k++) fbc_beginforces[k] = nullv; #ifdef RELAX fbc_dforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_dforces) error("Cannot allocate memory for fbc_dforces\n"); for (k=0; k<vtypes; k++) fbc_dforces[k] = nullv; #else fbc_endforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_endforces) error("Cannot allocate memory for fbc_endforces\n"); for (k=0; k<vtypes; k++) fbc_endforces[k] = nullv; #endif fbc_df = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_df) error("Cannot allocate memory for fbc_df\n"); for (k=0; k<vtypes; k++) fbc_df[k] = nullv; #endif /*FBC*/ #ifdef BEND /* Allocation & Initialisation of fbc_bforces */ fbc_bforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_bforces) error("Cannot allocate memory for fbc_bforces\n"); for (k=0; k<vtypes; k++) fbc_bforces[k] = nullv; /* Allocation & Initialisation of bend_forces */ bend_forces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==bend_forces) error("Cannot allocate memory for bend_forces\n"); for (k=0; k<vtypes; k++) bend_forces[k] = nullv; fbc_beginbforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_beginbforces) error("Cannot allocate memory for fbc_beginbforces\n"); for (k=0; k<vtypes; k++) fbc_beginbforces[k] = nullv; #ifdef RELAX fbc_bdforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_bdforces) error("Cannot allocate memory for fbc_bdforces\n"); for (k=0; k<vtypes; k++) fbc_bdforces[k] = nullv; #else fbc_endbforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_endbforces) error("Cannot allocate memory for fbc_endbforces\n"); for (k=0; k<vtypes; k++) fbc_endbforces[k] = nullv; #endif fbc_bdf = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_bdf) error("Cannot allocate memory for fbc_bdf\n"); for (k=0; k<vtypes; k++) fbc_bdf[k] = nullv; #endif /* BEND */ #ifdef RIGID /* Allocation & Initialization of superatom */ superatom = (int *) malloc( vtypes * sizeof(int) ); if (NULL==superatom) error("Cannot allocate memory for superatom vector\n"); for (k=0; k<vtypes; k++) superatom[k] = -1; /* Allocation of superforce */ superforce = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==superforce) error("Cannot allocate memory for superforce vector\n"); /* Allocation & Initialization of superrestrictions */ superrestrictions = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==superrestrictions) error("Cannot allocate memory for superrestriction vectors\n"); for (k=0; k<vtypes; k++) superrestrictions[k] = nullv; #endif #ifdef DEFORM /* Allocation & Initialisation of deform_shift */ deform_shift = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==deform_shift) error("Cannot allocate memory for deform_shift\n"); for (k=0; k<vtypes; k++) deform_shift[k] = nullv; /* Allocation & Initialisation of shear_def */ shear_def = (int *) malloc( vtypes * sizeof(int) ); if (NULL==shear_def) error("Cannot allocate memory for shear_def\n"); for (k=0; k<vtypes; k++) shear_def[k] = 0; /* Allocation & Initialisation of deform_shear */ deform_shear = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==deform_shear) error("Cannot allocate memory for deform_shear\n"); for (k=0; k<vtypes; k++) deform_shear[k] = nullv; /* Allocation & Initialisation of deform_base */ deform_base = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==deform_base) error("Cannot allocate memory for deform_base\n"); for (k=0; k<vtypes; k++) deform_base[k] = nullv; #endif } } #ifdef RIGID else if (strcasecmp(token,"rigid")==0) { int count, tmp, rigidv[15]; if (vtypes==0) error("specify parameter total_types before rigid"); /* virtual types forming superparticle */ i = getparam(token,rigidv,PARAM_INT,1+DIM,vtypes+DIM); /* determine number of types in superparticle */ count = i - DIM; /* construct superatom vector */ tmp = superatom[rigidv[0]]; for (i=0; i<count; i++) { if ( rigidv[i] > vtypes - 1 ) error("Atom type in superparticle does not exist\n"); if ( superatom[rigidv[i]] != tmp ) error("Intersecting superparticles\n"); if (tmp < 0) superatom[rigidv[i]] = nsuperatoms; } if (tmp < 0) { superrestrictions[nsuperatoms].x = rigidv[count ]; superrestrictions[nsuperatoms].y = rigidv[count+1]; #ifndef TWOD superrestrictions[nsuperatoms].z = rigidv[count+2]; #endif nsuperatoms++; } } #endif #ifdef RELAX else if (strcasecmp(token,"ekin_threshold")==0) { /* threshold for sufficient relaxation */ getparam(token,&ekin_threshold,PARAM_REAL,1,1); } else if (strcasecmp(token,"fnorm_threshold")==0) { /* threshold for sufficient relaxation */ getparam(token,&fnorm_threshold,PARAM_REAL,1,1); } else if (strcasecmp(token,"f_max_threshold")==0) { /* threshold for sufficient relaxation */ getparam(token,&f_max_threshold,PARAM_REAL,1,1); } else if (strcasecmp(token,"delta_epot_threshold")==0) { /* threshold for sufficient relaxation */ getparam(token,&delta_epot_threshold,PARAM_REAL,1,1); } else if (strcasecmp(token,"sscount")==0) { /* snapshot counter, for restarting */ getparam(token,&sscount,PARAM_INT,1,1); } else if (strcasecmp(token,"nfc")==0) { /* nfc counter, for restart */ getparam(token,&nfc,PARAM_INT,1,1); } #endif #ifdef FBC else if (strcasecmp(token,"extra_startforce")==0) { if (vtypes==0) error("specify parameter total_types before extra_startforce"); /* extra force for virtual types */ /* format: type force.x force.y (force.z) */ getparam(token,&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>vtypes-1) error("Force defined for non existing virtual atom type\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif fbc_beginforces[(int)(tempforce.x)] = force; fbc_forces [(int)(tempforce.x)] = force; } #ifdef RELAX else if (strcasecmp(token,"fbc_ekin_threshold")==0) { /* epsilon criterium to increment extra force*/ getparam(token,&ekin_threshold,PARAM_REAL,1,1); warning("Parameter fbc_ekin_threshold replaced by ekin_threshold"); } else if (strcasecmp(token,"max_fbc_int")==0) { /* max nr of steps between fbc increments */ getparam(token,&max_fbc_int,PARAM_INT,1,1); } else if (strcasecmp(token,"fbc_waitsteps")==0) { /* max nr of steps between fbc increments */ getparam(token,&max_fbc_int,PARAM_INT,1,1); warning("Parameter fbc_waitsteps replaced by max_fbc_int"); } else if (strcasecmp(token,"extra_dforce")==0) { if (vtypes==0) error("specify parameter total_types before extra_dforce"); /* extra force increment for virtual types */ /* format: type force.x force.y (force.z) */ getparam(token,&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>vtypes-1) error("Force increment defined for non existing virtual atom type\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif fbc_dforces[(int)(tempforce.x)] = force; } #else else if (strcasecmp(token,"extra_endforce")==0) { if (vtypes==0) error("specify parameter total_types before extra_endforce"); /* extra force for virtual types */ /* format: type force.x force.y (force.z) */ getparam(token,&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>vtypes-1) error("Force defined for non existing virtual atom type\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif fbc_endforces[(int)(tempforce.x)] = force; } #endif #endif /* FBC */ #ifdef BEND else if (strcasecmp(token,"extra_startbforce")==0) { if (vtypes==0) error("specify parameter total_types before extra_bstartforce"); /* extra force for virtual types */ /* format: type force.x force.y (force.z) */ getparam(token,&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>vtypes-1) error("Force defined for non existing virtual atom type\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif fbc_beginbforces[(int)(tempforce.x)] = force; fbc_bforces [(int)(tempforce.x)] = force; } #ifdef RELAX else if (strcasecmp(token,"max_bfbc_int")==0) { /* max nr of steps between fbc increments */ getparam(token,&max_bfbc_int,PARAM_INT,1,1); } else if (strcasecmp(token,"bfbc_waitsteps")==0) { /* max nr of steps between fbc increments */ getparam(token,&max_bfbc_int,PARAM_INT,1,1); warning("Parameter bfbc_waitsteps replaced by max_fbc_int"); } else if (strcasecmp(token,"extra_bdforce")==0) { if (vtypes==0) error("specify parameter total_types before extra_bdforce"); /* extra force increment for virtual types */ /* format: type force.x force.y (force.z) */ getparam(token,&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>vtypes-1) error("Force increment defined for non existing virtual atom type\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif fbc_bdforces[(int)(tempforce.x)] = force; } #else else if (strcasecmp(token,"extra_endbforce")==0) { if (vtypes==0) error("specify parameter total_types before extra_endforce"); /* extra force for virtual types */ /* format: type force.x force.y (force.z) */ getparam(token,&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>vtypes-1) error("Force defined for non existing virtual atom type\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif fbc_endbforces[(int)(tempforce.x)] = force; } #endif #endif /* BEND */ #ifdef FLAGEDATOMS else if (strcasecmp(token,"flagedatomstype")==0) { getparam(token,&flagedatomstype,PARAM_INT,1,1); } #endif #ifdef ZAPP else if (strcasecmp(token,"zapp_threshold")==0) { getparam(token,&zapp_threshold,PARAM_REAL,1,1); } #endif #ifdef BEND else if (strcasecmp(token,"bend_nmoments")==0) { /* nr of bending moments */ getparam(token,&bend_nmoments,PARAM_INT,1,1); if (vtypes<bend_nmoments*2) error("need vtypes>=2*bend_nmoments") ; /* now do some allocations and initialisations */ bend_axis = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_axis) error("Cannot allocate memory for bend_axis vectors\n"); for (k=0; k<bend_nmoments; k++) {bend_axis[k].x=0.0;bend_axis[k].y=0.0;bend_axis[k].z=0.0;} bend_origin = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_origin) error("Cannot allocate memory for bend_origin vectors\n"); for (k=0; k<bend_nmoments; k++) {bend_origin[k].x=0.0;bend_origin[k].y=0.0;bend_origin[k].z=0.0;} bend_cog = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_cog) error("Cannot allocate memory for bend_cog vectors\n"); for (k=0; k<bend_nmoments; k++) {bend_cog[k].x=0.0;bend_cog[k].y=0.0;bend_cog[k].z=0.0;} bend_vec = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_vec) error("Cannot allocate memory for bend_vec vectors\n"); for (k=0; k<bend_nmoments; k++) {bend_vec[k].x=0.0;bend_vec[k].y=0.0;bend_vec[k].z=0.0;} } else if (strcasecmp(token,"bend_axis")==0) { /* axis of all bending moments */ getparam("bend_axis",&tempforce,PARAM_REAL,DIM+1,DIM+1); if (tempforce.x>bend_nmoments) error("Bend axis defined for non existing bending moment\n"); force.x = tempforce.y; force.y = tempforce.z; #ifndef TWOD force.z = tempforce.z2; #endif if(SPROD(force,force)!=1) { warning("axis of bending moment not of unit length"); norm_bend_axis = sqrt(SPROD(force,force)); force.x /= norm_bend_axis; force.y /= norm_bend_axis; force.z /= norm_bend_axis; } bend_axis[(int)(tempforce.x)] = force; } else if (strcasecmp(token,"bend_vtype_of_origin")==0) { /* vtype of origin of bendingmoment i */ /* format: bend_vtype_of_origin bendingmoment vtype */ getparam("bend_vtype_of_origin",&tmp_ivec2d,PARAM_INT,1,2); bend_vtype_of_origin[tmp_ivec2d.x]=tmp_ivec2d.y; } else if (strcasecmp(token,"bend_vtype_of_force")==0) { /* vtype of atoms to apply FBC of bendingmoment i */ /* format: bend_vtype_of_force bendingmoment vtype */ getparam("bend_vtype_of_force",&tmp_ivec2d,PARAM_INT,1,2); bend_vtype_of_force[tmp_ivec2d.x]=tmp_ivec2d.y; } #endif /* BEND */ else if (strcasecmp(token,"restrictionvector")==0) { if (vtypes==0) error("specify parameter total_types before restrictionvector"); /* restrictions for virtual types */ /* format: type 1 1 (1) (=all directions ok) */ getparam(token,&tempvek,PARAM_REAL,DIM+1,DIM+1); if (tempvek.x>vtypes-1) error("Restriction defined for non existing virtual atom type\n"); vek.x = tempvek.y; vek.y = tempvek.z; #ifndef TWOD vek.z = tempvek.z2; #endif restrictions[(int)(tempvek.x)] = vek; } else if (strcasecmp(token,"box_x")==0) { /* 'x' or first vector for box */ getparam("box_x",&box_x,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"box_y")==0) { /* 'y' or second vector for box */ getparam("box_y",&box_y,PARAM_REAL,DIM,DIM); } #ifndef TWOD else if (strcasecmp(token,"box_z")==0) { /* 'z' or third vector for box */ getparam("box_z",&box_z,PARAM_REAL,DIM,DIM); } #endif else if (strcasecmp(token,"box_param")==0) { /* box parameters for generated structures */ getparam(token,&box_param,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"size_per_cpu")==0) { /* box parameters are given per CPU */ getparam(token,&size_per_cpu,PARAM_INT,1,1); } else if (strcasecmp(token,"box_unit")==0) { /* lattice parameter for generated structures */ getparam(token,&box_unit,PARAM_REAL,1,1); } else if (strcasecmp(token,"masses")==0) { /* masses for generated structures */ if (ntypes==0) error("specify parameter ntypes before parameter masses"); getparam(token,masses,PARAM_REAL,ntypes,ntypes); } else if (strcasecmp(token,"types")==0) { /* types for generated structures */ if (ntypes==0) error("specify parameter ntypes before parameter types"); getparam(token,gtypes,PARAM_INT,1,ntypes); } else if (strcasecmp(token,"timestep")==0) { /* size of timestep (in MD units) */ getparam(token,&timestep,PARAM_REAL,1,1); #ifdef ADAPTGLOK starttimestep = timestep; #endif } else if (strcasecmp(token,"ntypes")==0) { /* number of atom types */ int nt, init = (ntypes==0); getparam(token,&nt,PARAM_INT,1,1); if (init) { ntypes = nt; } else if (nt != ntypes) { error("ntypes must be constant during a simulation"); } if (init) { #ifdef MONO if (ntypes!=1) error("this executable is for monoatomic systems only!"); #endif ntypepairs = ((ntypes+1)*ntypes)/2; ntypetriples = ntypes * ntypepairs; #if defined(TERSOFF2) || defined(TERSOFFMOD2) || defined(BRENNER) nvalues = ntypepairs; #elif defined(TERSOFF) || defined(TERSOFFMOD) || defined(BRENNER) nvalues = ntypes; #endif /* array of masses for generated structures */ masses = (real *) malloc( ntypes * sizeof(real) ); if (NULL==masses) error("Cannot allocate memory for masses array\n"); for (k=0; k<ntypes; k++) masses[k] = 1.0; /* array of types for generated structures */ gtypes = (int *) malloc( ntypes * sizeof(int) ); if (NULL==gtypes) error("Cannot allocate memory for types array\n"); for (k=0; k<ntypes; k++) gtypes[k] = k; #ifdef EFILTER lower_e_pot = (real *) calloc(ntypes, sizeof(real)); if (NULL==lower_e_pot) error("Cannot allocate memory for lower_e_pot\n"); upper_e_pot = (real *) calloc(ntypes, sizeof(real)); if (NULL==upper_e_pot) error("Cannot allocate memory for upper_e_pot\n"); #endif #ifdef NNBR lower_nb_cut = (int *) calloc(ntypes, sizeof(int)); if (NULL==lower_nb_cut) error("Cannot allocate memory for lower_nb_cut\n"); upper_nb_cut = (int *) calloc(ntypes, sizeof(int)); if (NULL==upper_nb_cut) error("Cannot allocate memory for upper_nb_cut\n"); nb_r2_cut = (real *) calloc(ntypes*ntypes, sizeof(real)); if (NULL==nb_r2_cut) error("Cannot allocate memory for nb_r2_cut"); #endif #ifdef ORDPAR op_r2_cut = (real *) calloc(ntypes*ntypes, sizeof(real)); if (NULL==op_r2_cut) error("Cannot allocate memory for op_r2_cut"); op_weight = (real *) calloc(ntypes*ntypes, sizeof(real)); if (NULL==op_weight) error("Cannot allocate memory for op_weight"); #endif #ifdef KIM if (NULL == kim_el_names) kim_el_names = (char **)calloc(ntypes, sizeof(char *)); if (NULL == kim_el_names) error("Cannot allocate memory for kim_el_names\n"); for (i = 0;i<ntypes;i++) { /* allocate 12 characters for each species - should be more than enough */ kim_el_names[i] = (char *)calloc(12, sizeof(char)); if (NULL == kim_el_names[i]) error("Cannot allocate memory for kim_el_names\n"); sprintf(kim_el_names[i],'\0'); } #endif } } else if (strcasecmp(token,"starttemp")==0) { /* temperature at start of sim. */ getparam("starttemp",&temperature,PARAM_REAL,1,1); } else if (strcasecmp(token,"use_current_temp")==0) { /* set imposed temperature to current system temperature */ use_curr_temp = 1; } #ifdef TEMPCONTROL else if (strcasecmp(token,"endtemp")==0) { /* temperature at end of sim. */ getparam(token,&end_temp,PARAM_REAL,1,1); } #endif #if defined(STM) || defined(FRAC) || defined(FTG) else if (strcasecmp(token,"stadium")==0) { getparam("stadium",&stadium,PARAM_REAL,2,2); } else if (strcasecmp(token,"stadium2")==0) { getparam("stadium2",&stadium2,PARAM_REAL,2,2); } else if (strcasecmp(token,"center")==0) { getparam("center",&center,PARAM_REAL,2,2); } #endif #ifdef DAMP /* keep to old stadium convention */ else if (strcasecmp(token,"stadium")==0) { getparam("stadium",&stadium,PARAM_REAL,3,3); } else if (strcasecmp(token,"stadium2")==0) { getparam("stadium2",&stadium2,PARAM_REAL,3,3); } else if (strcasecmp(token,"center")==0) { getparam("center",&center,PARAM_REAL,3,3); } else if (strcasecmp(token,"damptemp")==0) { /* actual Damping factor */ getparam("damptemp",&damptemp,PARAM_REAL,1,1); } /* Damping prefactor */ else if (strcasecmp(token,"zeta_0")==0) { getparam("zeta_0",&zeta_0,PARAM_REAL,1,1); } else if (strcasecmp(token,"delta_finnis")==0) { /* actual Damping factor */ getparam("delta_finnis",&delta_finnis,PARAM_REAL,1,1); } #endif else if (strcasecmp(token,"cellsize")==0) { /* minimal cell diameter */ getparam("cellsize",&rtmp,PARAM_REAL,1,1); cellsz = MAX(cellsz,SQR(rtmp)); } else if (strcasecmp(token,"initsize")==0) { /* initial cell size */ getparam("initsize",&initsz,PARAM_INT,1,1); } else if (strcasecmp(token,"incrsize")==0) { /* initial cell size */ getparam("incrsize",&incrsz,PARAM_INT,1,1); } else if (strcasecmp(token,"outbuf_size")==0) { /* output buffer size in MB */ getparam(token,&outbuf_size,PARAM_INT,1,1); outbuf_size *= 1048576; } else if (strcasecmp(token,"inbuf_size")==0) { /* total input buffer size in MB */ getparam(token,&inbuf_size,PARAM_INT,1,1); inbuf_size *= 1048576; } else if (strcasecmp(token,"dist_chunk_size")==0) { /* size of MPI reduction in mega-floats */ getparam(token,&dist_chunk_size,PARAM_INT,1,1); dist_chunk_size *= 1048576; } #ifdef AND else if (strcasecmp(token,"tempintv")==0) { /* temperature interval */ getparam("tempintv",&tempintv,PARAM_INT,1,1); } #endif #ifdef BER else if (strcasecmp(token,"tau_berendsen")==0) { /* temperature interval */ getparam("tau_berendsen",&tauber,PARAM_REAL,1,1); } #endif #if defined(NVT) || defined(NPT) || defined(STM) else if (strcasecmp(token,"eta")==0) { /* eta variable for NVT or NPT thermostat */ getparam("eta",&eta,PARAM_REAL,1,1); } else if (strcasecmp(token,"tau_eta")==0) { /* time constant tau_eta for thermostat */ getparam("tau_eta",&tau_eta,PARAM_REAL,1,1); if (tau_eta == (real)0) { error("tau_eta is zero.\n"); } isq_tau_eta = 1.0 / SQR(tau_eta); } else if (strcasecmp(token,"isq_tau_eta")==0) { /* inverse of square of time constant tau_eta for thermostat */ getparam("isq_tau_eta",&isq_tau_eta,PARAM_REAL,1,1); if (isq_tau_eta == (real)0) tau_eta = 0.0; else tau_eta = 1.0 / sqrt(isq_tau_eta); } else if (strcasecmp(token,"inv_tau_eta")==0) { /* inverse of time constant tau_eta for thermostat */ getparam("inv_tau_eta",&isq_tau_eta,PARAM_REAL,1,1); if (isq_tau_eta == (real)0) tau_eta = 0.0; else tau_eta = 1.0 / isq_tau_eta; isq_tau_eta = SQR(isq_tau_eta); } #ifdef UNIAX else if (strcasecmp(token,"uniax_inert")==0) { /* moment of inertia */ getparam(token,&uniax_inert,PARAM_REAL,1,1); } else if (strcasecmp(token,"uniax_sig")==0) { /* nearest neighbor distances of potential in the three directions */ getparam(token,&uniax_sig,PARAM_REAL,3,3); if (uniax_sig.x != uniax_sig.y) error("UNIAX molecules must be uniaxial!"); } else if (strcasecmp(token,"uniax_eps")==0) { /* depth of potential in the three directions */ getparam(token,&uniax_eps,PARAM_REAL,3,3); if (uniax_eps.x != uniax_eps.y) error("UNIAX molecules must be uniaxial!"); } else if (strcasecmp(token,"eta_rot")==0) { /* eta variable of rotational motion for NVT or NPT thermostat */ getparam("eta_rot",&eta_rot,PARAM_REAL,1,1); } else if (strcasecmp(token,"tau_eta_rot")==0) { /* time constant tau_eta for thermostat of rotational motion */ getparam("tau_eta_rot",&isq_tau_eta_rot,PARAM_REAL,1,1); if (isq_tau_eta_rot == (real)0) { error("tau_eta_rot is zero.\n"); } isq_tau_eta_rot = 1.0 / SQR(isq_tau_eta_rot); } else if (strcasecmp(token,"isq_tau_eta_rot")==0) { /* squared inverse of time constant for thermostat of rot. motion */ getparam("isq_tau_eta_rot",&isq_tau_eta_rot,PARAM_REAL,1,1); } else if (strcasecmp(token,"inv_tau_eta_rot")==0) { /* inverse of time constant for thermostat of rotational motion */ getparam("inv_tau_eta_rot",&isq_tau_eta_rot,PARAM_REAL,1,1); isq_tau_eta_rot = SQR(isq_tau_eta_rot); } #endif #endif #if defined(FRAC) || defined(FTG) else if (strcasecmp(token,"strainrate")==0) { /* strain rate for crack loading */ getparam("strainrate",&dotepsilon0,PARAM_REAL,1,1); dotepsilon = dotepsilon0; } else if (strcasecmp(token,"expansionmode")==0) { /* strain mode for crack loading */ getparam("expansionmode",&expansionmode,PARAM_INT,1,1); } else if (strcasecmp(token,"gamma_bar")==0) { /* Damping prefactor gamma_bar */ getparam("gamma_bar",&gamma_bar,PARAM_REAL,1,1); } else if (strcasecmp(token,"gamma_damp")==0) { /* actual Damping factor */ getparam("gamma_damp",&gamma_damp,PARAM_REAL,1,1); } else if (strcasecmp(token,"dampingmode")==0) { /* damping mode for stadium geometry */ getparam("dampingmode",&dampingmode,PARAM_INT,1,1); } #endif #ifdef FTG else if (strcasecmp(token,"delta_ftg")==0) { /* time constant delta for local temperature control */ getparam("delta_ftg",&delta_ftg,PARAM_REAL,1,1); } else if (strcasecmp(token,"gamma_min")==0) { /* minimal damping prefactor gamma_bar */ getparam("gamma_min",&gamma_min,PARAM_REAL,1,1); } else if (strcasecmp(token,"Tleft")==0) { /* damping mode for stadium geometry */ getparam("Tleft",&Tleft,PARAM_REAL,1,1); } else if (strcasecmp(token,"Tright")==0) { /* damping mode for stadium geometry */ getparam("Tright",&Tright,PARAM_REAL,1,1); } else if (strcasecmp(token,"nslices")==0) { /* nuber of slices*/ int ns, init = (nslices==0); getparam(token,&ns,PARAM_INT,1,1); if (init) { nslices = ns; } else if ( ns != nslices) { error("nslices must be constant during a simulation"); } if (init) { ninslice = (int *) malloc(nslices*sizeof(int)); if (NULL==ninslice) error("Cannot allocate memory for ninslice vector\n"); for (k=0; k<nslices; k++) ninslice[k] = 0; gamma_ftg = (real *) malloc(nslices*sizeof(real)); if (NULL==gamma_ftg) error("Cannot allocate memory for gamma_ftg vector\n"); for (k=0; k<nslices; k++) gamma_ftg[k] = 0.0; E_kin_ftg = (real*) malloc(nslices*sizeof(real)); if (NULL==E_kin_ftg) error("Cannot allocate memory for E_kin_ftg vector\n"); for (k=0; k<nslices; k++) E_kin_ftg[k] = 0.0; } } else if (strcasecmp(token,"gamma_ftg")==0) { /* actual Damping factor for each slice */ if (nslices==0) error("specify parameter nslices before gamma_ftg"); /* format: slice gamma_ftg */ getparam(token,&tempvek,PARAM_REAL,2,2); if (tempvek.x>nslices-1) error("actual Damping factorfor non existing slice\n"); gamma_ftg[(int)(tempvek.x)] = tempvek.y; } else if (strcasecmp(token,"nslices_Left")==0) { /* nuber of slices with Tleft */ getparam(token,&nslices_Left,PARAM_INT,1,1); } else if (strcasecmp(token,"nslices_Right")==0) { /* nuber of slices with Right */ getparam(token,&nslices_Right,PARAM_INT,1,1); } #endif #ifdef FINNIS else if (strcasecmp(token,"delta_finnis")==0) { /* time constant delta for local temperature control */ getparam("delta_finnis",&delta_finnis,PARAM_REAL,1,1); } else if (strcasecmp(token,"zeta_0")==0) { /* time constant delta for local temperature control */ getparam("zeta_0",&zeta_0,PARAM_REAL,1,1); } #endif #ifndef TWOD else if (strcasecmp(token,"view_pos")==0) { /* view position */ getparam("view_pos",&view_pos,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"view_dir")==0) { /* view direction */ getparam("view_dir",&view_dir,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"projection")==0) { /* projection (0=orthogonal, 1=perspective) */ getparam("projection",&projection,PARAM_INT,1,1); } #endif else if (strcasecmp(token,"ecut_kin")==0) { /* kinetic energy interval for pictures (min/max) */ getparam("ecut_kin",&ecut_kin,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"ecut_pot")==0) { /* potential energy interval for pictures (min/max) */ getparam("ecut_pot",&ecut_pot,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"pic_ll")==0) { /* lower left corner of picture */ getparam("pic_ll", &pic_ll,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"pic_ur")==0) { /* upper right corner of picture */ getparam("pic_ur", &pic_ur,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"pic_res")==0) { /* number of pixels in x/y direction */ getparam("pic_res", &pic_res,PARAM_INT,1,2); } else if (strcasecmp(token,"nsmear")==0) { /* smearing radius in pixels */ getparam("nsmear", &nsmear,PARAM_INT,1,1); } else if (strcasecmp(token,"pic_type")==0) { /* number of pixels in x/y direction */ getparam("pic_type", &pic_type,PARAM_INT,1,1); } #ifdef SLLOD else if (strcasecmp(token,"shear_rate")==0) { /* shear strength, corresponds to xy-like entries in strain tensor */ getparam("shear_rate",&shear_rate,PARAM_REAL,DIM,DIM); } #ifndef TWOD else if (strcasecmp(token,"shear_rate2")==0) { /* shear strength, corresponds to yx-entry in strain tensor */ getparam("shear_rate2",&shear_rate2,PARAM_REAL,DIM,DIM); } #endif #endif #ifdef CYCLE else if (strcasecmp(token,"lindef_freq")==0) { /* frequency for deformation */ getparam(token,&lindef_freq,PARAM_REAL,1,1); } #endif #ifdef HOMDEF else if (strcasecmp(token,"lindef_interval")==0) { /* period of linear deformation intervals */ getparam(token,&lindef_int,PARAM_INT,1,1); } else if (strcasecmp(token,"lindef_size")==0) { /* scale factor for deformation */ /* in case of CYCLE this is the strain amplitude */ getparam(token,&lindef_size,PARAM_REAL,1,1); } else if (strcasecmp(token,"lindef_x")==0) { /* first row of deformation matrix */ getparam(token,&lindef_x,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"lindef_y")==0) { /* second row of deformation matrix */ getparam(token,&lindef_y,PARAM_REAL,DIM,DIM); } #ifndef TWOD else if (strcasecmp(token,"lindef_z")==0) { /* third row of deformation matrix */ getparam(token,&lindef_z,PARAM_REAL,DIM,DIM); } #endif else if (strcasecmp(token,"shear_module")==0) { /* estimate of shear module */ getparam(token,&shear_module,PARAM_REAL,1,1); } else if (strcasecmp(token,"bulk_module")==0) { /* estimate of bulk module */ getparam(token,&bulk_module,PARAM_REAL,1,1); } else if (strcasecmp(token,"relax_rate")==0) { /* pressure relaxation rate */ getparam(token,&relax_rate,PARAM_REAL,1,1); } else if (strcasecmp(token,"relax_mode")==0) { /* pressure relaxation mode */ getparam(token,tmpstr,PARAM_STR,1,255); if (strcasecmp(tmpstr,"full" )==0) relax_mode = RELAX_FULL; else if (strcasecmp(tmpstr,"axial")==0) relax_mode = RELAX_AXIAL; else if (strcasecmp(tmpstr,"iso" )==0) relax_mode = RELAX_ISO; else error_str("Unknown relax_mode %s", tmpstr); } #endif #if defined(HOMDEF) || defined(NPT_axial) else if (strcasecmp(token,"relax_dirs")==0) { /* box lengths which should be relaxed */ getparam("relax_dirs",&relax_dirs,PARAM_INT,DIM,DIM); } #endif #ifdef GLOK else if (strcasecmp(token,"glok_ekin_threshold")==0) { /* threshold for ekin */ getparam(token,&glok_ekin_threshold,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_ekin_threshold")==0) { /* threshold for ekin */ getparam(token,&glok_ekin_threshold,PARAM_REAL,1,1); } #endif #ifdef MIX else if (strcasecmp(token,"glok_mix")==0) { /* factor to turn velocities more parallel to forces */ getparam(token,&glok_mix,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_mix")==0) { /* factor to turn velocities more parallel to forces */ getparam(token,&glok_mix,PARAM_REAL,1,1); } else if (strcasecmp(token,"glok_mixdec")==0) { /*decrease factor to turn velocities more parallel to forces */ getparam(token,&glok_mixdec,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_mixdec")==0) { /*decrease factor to turn velocities more parallel to forces */ getparam(token,&glok_mixdec,PARAM_REAL,1,1); } #endif #ifdef ADAPTGLOK /* FIRE = mixedadaptglok, for cosmetic reasons & backward compatibility now all parameters can either be called glok_something or fire_something */ else if (strcasecmp(token,"glok_minsteps")==0) { /* minimum of steps before increasing the timestep */ getparam(token,&glok_minsteps,PARAM_INT,1,1); } else if (strcasecmp(token,"fire_minsteps")==0) { /* minimum of steps before increasing the timestep */ getparam(token,&glok_minsteps,PARAM_INT,1,1); } else if (strcasecmp(token,"min_nPxF")==0) { /* minimum gloks before increasing the timestep */ getparam(token,&min_nPxF,PARAM_INT,1,1); } else if (strcasecmp(token,"glok_fmaxcrit")==0) { /* critical max. force component */ getparam(token,&glok_fmaxcrit,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_fmaxcrit")==0) { /* critical max. force component */ getparam(token,&glok_fmaxcrit,PARAM_REAL,1,1); } else if (strcasecmp(token,"glok_incfac")==0) { /* factor to increase the timestep */ getparam(token,&glok_incfac,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_incfac")==0) { /* factor to increase the timestep */ getparam(token,&glok_incfac,PARAM_REAL,1,1); } else if (strcasecmp(token,"glok_decfac")==0) { /* factor to decrease the timestep */ getparam(token,&glok_decfac,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_decfac")==0) { /* factor to decrease the timestep */ getparam(token,&glok_decfac,PARAM_REAL,1,1); } else if (strcasecmp(token,"glok_maxtimestep")==0) { /* max timestep */ getparam(token,&glok_maxtimestep,PARAM_REAL,1,1); } else if (strcasecmp(token,"fire_maxtimestep")==0) { /* max timestep */ getparam(token,&glok_maxtimestep,PARAM_REAL,1,1); } else if (strcasecmp(token,"glok_int")==0) { /* only needed for restarting */ getparam(token,&glok_int,PARAM_INT,1,1); } else if (strcasecmp(token,"fire_int")==0) { /* only needed for restarting */ getparam(token,&glok_int,PARAM_INT,1,1); } #endif #ifdef DEFORM else if (strcasecmp(token,"max_deform_int")==0) { /* max nr of steps between shears */ getparam("max_deform_int",&max_deform_int,PARAM_INT,1,1); } else if (strcasecmp(token,"deform_size")==0) { /* scale factor for deformation */ getparam("deform_size",&deform_size,PARAM_REAL,1,1); } else if (strcasecmp(token,"deform_shift")==0) { /* deform shift for virtual types */ /* format: type shift.x shift.y (shift.z) */ if (vtypes==0) error("specify parameter total_types before deform_shift"); getparam(token,&tempshift,PARAM_REAL,DIM+1,DIM+1); if (tempshift.x>vtypes-1) error("Shift defined for non existing virtual atom type\n"); shift.x = tempshift.y; shift.y = tempshift.z; #ifndef TWOD shift.z = tempshift.z2; #endif deform_shift[(int)(tempshift.x)] = shift; } else if (strcasecmp(token,"deform_shear")==0) { /* deform shear for virtual types */ /* format: type shear.x shear.y (shear.z) */ if (vtypes==0) error("specify parameter total_types before deform_shear"); getparam(token,&tempshift,PARAM_REAL,DIM+1,DIM+1); if (tempshift.x>vtypes-1) error("Shear defined for non existing virtual atom type\n"); shear.x = tempshift.y; shear.y = tempshift.z; #ifndef TWOD shear.z = tempshift.z2; #endif deform_shear[(int)(tempshift.x)] = shear; shear_def [(int)(tempshift.x)] = 1; } else if (strcasecmp(token,"deform_base")==0) { /* deform base for virtual types */ /* format: type shear.x shear.y (shear.z) */ if (vtypes==0) error("specify parameter total_types before deform_base"); getparam(token,&tempshift,PARAM_REAL,DIM+1,DIM+1); if (tempshift.x>vtypes-1) error("Shear base defined for non existing virtual atom type\n"); base.x = tempshift.y; base.y = tempshift.z; #ifndef TWOD base.z = tempshift.z2; #endif deform_base[(int)(tempshift.x)] = base; } #endif /* DEFORM */ #ifdef CG else if (strcasecmp(token,"linmin_maxsteps")==0) { /* max steps to find min in one direction */ getparam("linmin_maxsteps",&linmin_maxsteps,PARAM_INT,1,1); } else if (strcasecmp(token,"linmin_tol")==0) { /* tolerance to stop min search in one direction */ getparam("linmin_tol",&linmin_tol,PARAM_REAL,1,1); } else if (strcasecmp(token,"linmin_dmax")==0) { /* max. length of trial step in 1d minimum search */ getparam("linmin_dmax",&linmin_dmax,PARAM_REAL,1,1); } else if (strcasecmp(token,"linmin_dmin")==0) { /* max. length of trial step in 1d minimum search */ getparam("linmin_dmin",&linmin_dmin,PARAM_REAL,1,1); } else if (strcasecmp(token,"cg_glimit")==0) { /* limit in mnbrak */ getparam("cg_glimit",&cg_glimit,PARAM_REAL,1,1); } else if (strcasecmp(token,"cg_zeps")==0) { /* in brent */ getparam("cg_zeps",&cg_zeps,PARAM_REAL,1,1); } else if (strcasecmp(token,"cg_fr")==0) { /* Fletcher-Reeves mode or not*/ getparam(token,&cg_fr,PARAM_INT,1,1); } else if (strcasecmp(token,"cg_reset_int")==0) { /* interval for resetting cg */ getparam(token,&cg_reset_int,PARAM_INT,1,1); } else if (strcasecmp(token,"cg_infolevel")==0) { /* cg_infolevel controls verbosity */ getparam(token,&cg_infolevel,PARAM_INT,1,1); } else if (strcasecmp(token,"cg_mode")==0) { /* conjugate gradient mode - at present just the default one */ getparam(token,tmpstr,PARAM_STR,1,255); if (strcasecmp(tmpstr,"cge")==0) { cg_mode = CGE; } /* not implemented yet else if (strcasecmp(tmpstr,"cgef")==0) { cg_mode = CGEF; } */ else error_str("unknown CG mode %s",tmpstr); } #endif /* CG */ #ifdef ACG else if (strcasecmp(token,"acg_alpha")==0) { /* starting alpha */ getparam(token,&acg_init_alpha,PARAM_REAL,1,1); } else if (strcasecmp(token,"acg_incfac")==0) { /* increase alpha */ getparam(token,&acg_incfac,PARAM_REAL,1,1); } else if (strcasecmp(token,"acg_decfac")==0) { /* decrease alpha */ getparam(token,&acg_decfac,PARAM_REAL,1,1); } #endif #ifdef SHOCK else if (strcasecmp(token,"shock_strip")==0) { /* shock strip width (in x dir.) */ getparam("shock_strip",&shock_strip,PARAM_REAL,1,1); } else if (strcasecmp(token,"shock_speed")==0) { /* shock speed (in x dir.) */ getparam("shock_speed",&shock_speed,PARAM_REAL,1,1); } else if (strcasecmp(token,"shock_speed_left")==0) { /* shock speed (in x dir.) */ getparam("shock_speed_l",&shock_speed_l,PARAM_REAL,1,1); } else if (strcasecmp(token,"shock_speed_right")==0) { /* shock speed (in x dir.) */ getparam("shock_speed_r",&shock_speed_r,PARAM_REAL,1,1); } else if (strcasecmp(token,"shock_incr")==0) { /* steps to achieve full velocity */ getparam("shock_incr",&shock_incr,PARAM_INT,1,1); } else if (strcasecmp(token,"shock_mode")==0) { /* shock type: plate or half */ getparam("shock_mode",&shock_mode,PARAM_INT,1,1); if (shock_mode > 1) shock_strip = 0; /* compatibility with old input files */ if (shock_mode < 2 && shock_mode > 4) shock_mode = 1; /* */ if (shock_mode == 4 && shock_speed_l ==0) shock_speed_l = shock_speed; if (shock_mode == 4 && shock_speed_r ==0) shock_speed_r = shock_speed; } #endif #ifdef MPI else if (strcasecmp(token,"cpu_dim")==0) { /* CPU array dimension */ getparam(token,&cpu_dim,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"parallel_output")==0) { /* parallel output flag */ getparam(token,&parallel_output,PARAM_INT,1,1); } else if (strcasecmp(token,"outputgrpsize")==0) { /* parallel output flag */ getparam(token,&outputgrpsize,PARAM_INT,1,1); } else if (strcasecmp(token,"parallel_input")==0) { /* parallel input flag */ getparam(token,&parallel_input,PARAM_INT,1,1); } else if (strcasecmp(token,"msgbuf_size")==0) { /* security factor of message buffer size */ getparam(token,&msgbuf_size,PARAM_REAL,1,1); } #endif else if (strcasecmp(token,"binary_output")==0) { /* binary output flag */ getparam(token,&binary_output,PARAM_INT,1,1); } #ifdef CORRELATE else if (strcasecmp(token,"correl_rmax")==0) { /* dimension of histogram in r domain */ getparam("correl_rmax",&ncorr_rmax,PARAM_INT,1,1); } else if (strcasecmp(token,"correl_tmax")==0) { /* dimension of histogram in t domain */ getparam("correl_tmax",&ncorr_tmax,PARAM_INT,1,1); } else if (strcasecmp(token,"correl_omode")==0) { /* repeat interval for correlation */ getparam("correl_omode",&correl_omode,PARAM_INT,1,1); } else if (strcasecmp(token,"GS_rcut")==0) { /* cutoff radius for correlation data writes */ getparam("GS_rcut",&GS_rcut,PARAM_REAL,1,1); } #endif #if defined(CORRELATE) || defined(MSQD) else if (strcasecmp(token,"correl_start")==0) { /* start time for correlation */ getparam("correl_start",&correl_start,PARAM_INT,1,1); } else if (strcasecmp(token,"correl_end")==0) { /* end time for correlation */ getparam("correl_end",&correl_end,PARAM_INT,1,1); } else if (strcasecmp(token,"correl_ts")==0) { /* sampling time interval for correlation */ getparam("correl_ts",&correl_ts,PARAM_INT,1,1); } else if (strcasecmp(token,"correl_int")==0) { /* repeat interval for correlation */ getparam("correl_int",&correl_int,PARAM_INT,1,1); } else if (strcasecmp(token,"msqd_ntypes")==0) { /* write msqd for real types */ getparam("msqd_ntypes",&msqd_ntypes,PARAM_INT,1,1); } else if (strcasecmp(token,"msqd_vtypes")==0) { /* write msqd for virtual types */ getparam("msqd_vtypes",&msqd_vtypes,PARAM_INT,1,1); } #endif #ifdef NMOLDYN else if (strcasecmp(token,"nmoldyn_int")==0) { /* interval for nmoldyn trajectory writes */ getparam(token,&nmoldyn_int,PARAM_INT,1,1); } else if (strcasecmp(token,"nmoldyn_veloc")==0) { /* include velocities in nmoldyn trajectory? */ getparam(token,&nmoldyn_veloc,PARAM_INT,1,1); } #endif #ifdef DSF else if (strcasecmp(token,"dsf_int")==0) { /* interval for dsf updates */ getparam(token,&dsf_int,PARAM_INT,1,1); } else if (strcasecmp(token,"dsf_weight")==0) { /* weights for dsf (usually coherent scattering length) */ if (0==ntypes) error("specify parameter ntypes before dsf_weight"); dsf_weight = (real *) malloc( ntypes * sizeof(real) ); if (NULL==dsf_weight) error("cannot allocate dsf_weight"); getparam(token,dsf_weight,PARAM_REAL,ntypes,ntypes); } else if (strcasecmp(token,"dsf_nk")==0) { /* number of k-point series */ getparam(token,&dsf_nkmax,PARAM_INT,1,1); dsf_k0 = (int *) malloc( dsf_nkmax * DIM * sizeof(int) ); dsf_kdir = (int *) malloc( dsf_nkmax * DIM * sizeof(int) ); dsf_kmax = (int *) malloc( dsf_nkmax * sizeof(int) ); if ((NULL==dsf_k0) || (NULL==dsf_kdir) || (NULL==dsf_kmax)) error("cannot allocate dsf arrays"); } else if (strcasecmp(token,"dsf_k")==0) { /* k-point series */ int i=0, tmp[2*DIM+1]; if (dsf_nk>=dsf_nkmax) error("number of k-point series exceeds dsf_nkmax"); getparam(token,tmp,PARAM_INT,2*DIM+1,2*DIM+1); dsf_k0 [DIM*dsf_nk ] = tmp[i++]; dsf_k0 [DIM*dsf_nk+1] = tmp[i++]; #ifndef TWOD dsf_k0 [DIM*dsf_nk+2] = tmp[i++]; #endif dsf_kdir[DIM*dsf_nk ] = tmp[i++]; dsf_kdir[DIM*dsf_nk+1] = tmp[i++]; #ifndef TWOD dsf_kdir[DIM*dsf_nk+2] = tmp[i++]; #endif dsf_kmax[ dsf_nk ] = tmp[i++]; dsf_nk++; } #endif #if defined(HC) || defined(NVX) else if (strcasecmp(token, "hc_int")==0){ /* number of steps between heat current or profile writes */ getparam(token, &hc_int, PARAM_INT, 1,1); } else if (strcasecmp(token, "hc_start")==0){ /* start step for heat current or profile measurement */ getparam(token, &hc_start, PARAM_INT, 1,1); } #endif #ifdef HC else if (strcasecmp(token, "hc_av_start")==0){ /* start step for energy averaging */ getparam(token, &hc_av_start, PARAM_INT, 1,1); } #endif #ifdef NVX else if (strcasecmp(token, "hc_nlayers")==0){ /* number of layers */ getparam(token, &hc_nlayers, PARAM_INT, 1,1); } else if (strcasecmp(token, "hc_count")==0){ /* running index of temperature profile */ getparam(token, &hc_count, PARAM_INT, 1,1); } else if (strcasecmp(token, "hc_heatcurr")==0){ /* induced heat current density */ getparam(token, &hc_heatcurr, PARAM_REAL, 1,1); } #endif // ******************************************** // * PARAMS FOR TTM AND FDTD // ******************************************** #ifdef TTM //MY MOD: fd_g wird dyn. berechnet //else if (strcasecmp(token, "fd_g")==0){ // /* electron phonon coupling constant */ // getparam("fd_g", &fd_g, PARAM_REAL, 1, 1); //} else if (strcasecmp(token, "fd_update_steps")==0){ /* how many steps before averaging over atoms to update FD cells */ getparam("fd_update_steps", &fd_update_steps, PARAM_INT, 1, 1); } else if (strcasecmp(token, "fd_ext")==0){ /* how many MD cells in x,y,z-direction to one FD cell */ getparam("fd_ext", &fd_ext, PARAM_INT, DIM, DIM); } else if (strcasecmp(token, "fd_one_d")==0){ /* FD lattice one dimensional in x or y or z if this is given */ getparam("fd_one_d", &fd_one_d_str, PARAM_STR, 1, 255); } // MY MOD: fd_k wird dynamisch berechnet // else if (strcasecmp(token, "fd_k")==0){ // /* FD electronic heat conductivity */ // getparam("fd_k", &fd_k, PARAM_REAL, 1, 1); // } // MY MOD: fd_c wird dynamisch berechnet // else if (strcasecmp(token, "fd_c")==0){ // /* FD electronic heat capacity */ // getparam("fd_c", &fd_c, PARAM_REAL, 1, 1); //} // fd_gamma wird nicht mehr benötigt // else if (strcasecmp(token, "fd_gamma")==0) // { // // FD electronic heat capacity / T_e (proport. const.) // getparam("fd_gamma", &fd_gamma, PARAM_REAL, 1, 1); // } else if (strcasecmp(token, "fd_n_timesteps")==0){ // How many FD time steps to one MD time step? getparam("fd_n_timesteps", &fd_n_timesteps, PARAM_INT, 1, 1); } else if (strcasecmp(token, "ttm_int")==0){ /* How many time steps between ttm writeouts? */ getparam("ttm_int", &ttm_int, PARAM_INT, 1, 1); } else if (strcasecmp(token, "init_t_el")==0){ /* Initialize T_el to what temperature? */ getparam("init_t_el", &init_t_el, PARAM_REAL, 1, 1); } else if (strcasecmp(token, "fix_t_el")==0){ /* fix T_el to init_t_el? */ getparam("fix_t_el", &fix_t_el, PARAM_INT, 1, 1); } else if(strcasecmp(token,"fd_min_atoms")==0){ getparam("fd_min_atoms",&fd_min_atoms,PARAM_INT,1,1); } //MYMOD FÜR loadbalance mit ttm #ifdef TTM1D else if(strcasecmp(token,"vlatdim")==0){ getparam("vlatdim",&vlatdim,PARAM_INT,1,1); } else if(strcasecmp(token,"vlatbuffer")==0){ getparam("vlatbuffer",&vlatbuffer,PARAM_INT,1,1); } else if(strcasecmp(token,"ttmdimx")==0){ getparam("ttmdimx",&ttmdimx,PARAM_INT,1,1); } #endif //ENDOF MYMOD //MY MOD: FDTD (ACHTUNG: alles in SI-einheiten!) #if defined(FDTD) || defined(LASER) || defined(TMM) #ifdef TMM else if(strcasecmp(token,"tmm_threshold")==0){ getparam("tmm_threshold",&tmm_absorption_threshold,PARAM_REAL,1,1); } #endif else if(strcasecmp(token,"I0")==0){ getparam("I0",&I0,PARAM_REAL,1,1); } else if (strcasecmp(token, "laser_sigma_t")==0){ getparam("laser_sigma_t", &laser_sigma_t, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_t_0")==0){ getparam("laser_t_0", &laser_t_0, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_sigma_t1")==0){ getparam("laser_sigma_t1", &laser_sigma_t1, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_t_1")==0){ getparam("laser_t_1", &laser_t_1, PARAM_REAL, 1,1); } #ifdef FDTD else if(strcasecmp(token,"Sc")==0){ getparam("Sc",&Sc,PARAM_REAL,1,1); } else if (strcasecmp(token, "srcw")==0){ getparam("srcw", &srcw, PARAM_REAL, 1,1); } else if (strcasecmp(token, "srcx")==0){ getparam("srcx", &srcx, PARAM_REAL, 1,1); } else if (strcasecmp(token, "pml")==0){ getparam("bw", &bw, PARAM_INT, 1,1); } #endif else if (strcasecmp(token, "lambda")==0){ getparam("lambda", &lambda, PARAM_REAL, 1,1); } #endif //FDTD LASER und TMM //MY MOD: WIDERANGE TTM else if(strcasecmp(token,"atomic_weight")==0){ getparam("atomic_weight",&atomic_weight,PARAM_REAL,1,1); } else if(strcasecmp(token,"atomic_charge")==0){ getparam("atomic_charge",&atomic_charge,PARAM_REAL,1,1); } #ifdef DIRICHLET else if(strcasecmp(token,"dirichlet_surfx")==0){ getparam("dirichlet_surfx",&dirichlet_surfx,PARAM_REAL,1,1); } #endif //DIRICHLET #endif //TTM //MY MOD: Non reflecting boundary #ifdef NRB else if(strcasecmp(token,"nrb_alat")==0){ getparam("nrb_alat",&nrb_alat,PARAM_REAL,1,1); } else if(strcasecmp(token,"nrb_eps")==0){ getparam("nrb_eps",&nrb_eps,PARAM_REAL,1,1); } else if(strcasecmp(token,"nrb_k")==0){ getparam("nrb_k",&nrbk,PARAM_REAL,1,1); } else if(strcasecmp(token,"nrb_overwrite")==0){ getparam("nrb_overwrite",&nrb_overwrite,PARAM_INT,1,1); } else if(strcasecmp(token,"nrb_infile")==0){ getparam("nrb_infile",nrb_input_file,PARAM_STR,1,255); nrb_readfile=1; } #endif //MYMOD: LOCAL ORDER PARAMETER #ifdef LOD //lattice spacing else if(strcasecmp(token,"alat")==0){ getparam("alat",&alat,PARAM_REAL,1,1); } else if(strcasecmp(token,"lodnorm")==0){ getparam("lodnorm",&lodnorm,PARAM_REAL,1,1); } #endif // *********************************** // * MY MOD: shiftx_front,shiftx_rear // ************************************ else if (strcasecmp(token, "shiftx_front")==0){ getparam("shiftx_front", &shiftx_front, PARAM_REAL, 1,1); } else if (strcasecmp(token, "shiftx_rear")==0){ getparam("shiftx_rear", &shiftx_rear, PARAM_REAL, 1,1); } else if (strcasecmp(token, "shifty_front")==0){ getparam("shifty_front", &shifty_front, PARAM_REAL, 1,1); } else if (strcasecmp(token, "shifty_rear")==0){ getparam("shifty_rear", &shifty_rear, PARAM_REAL, 1,1); } else if (strcasecmp(token, "shiftz_front")==0){ getparam("shiftz_front", &shiftz_front, PARAM_REAL, 1,1); } else if (strcasecmp(token, "shiftz_rear")==0){ getparam("shiftz_rear", &shiftz_rear, PARAM_REAL, 1,1); } #ifdef PDECAY else if (strcasecmp(token, "xipdecay")==0){ /* value for the damping parameter added to the EQ's of motion */ getparam("xipdecay", &xipdecay, PARAM_REAL, 1,1); } else if (strcasecmp(token, "ramp_fraction")==0){ /* fraction of the sample on which the damping ramp acts */ getparam("ramp_fraction", &ramp_fraction, PARAM_REAL, 1,1); if(ramp_fraction > 0.9 || ramp_fraction < 0.0) error("Nonsense value for ramp_fraction detected, please check prameter file!"); } /******************************************************************************************** * MY MOD: When restarting TTM-Simulation, user has to provide ramp_start and ramp_end * * Otherwise the accelaration of the atoms in laser_rescale_ttm might be * * very different because the sample dimension changes * *CAUTION: No error-handling managed yet * *********************************************************************************************/ else if(strcasecmp(token,"ramp_start")==0){ getparam("ramp_start",&ramp_start,PARAM_REAL,1,1); } else if(strcasecmp(token,"ramp_end")==0){ getparam("ramp_end",&ramp_end,PARAM_REAL,1,1); } else if (strcasecmp(token, "pdecay_mode")==0){ /* mode for the damping function */ getparam("pdecay_mode", &pdecay_mode, PARAM_INT, 1,1); } // *********************************************** //MY MOD: Hot fix fuer pdecay fuer +y und -y bnd else if(strcasecmp(token,"ramp_y0max")==0){ getparam("ramp_y0max",&ramp_y0max,PARAM_REAL,1,1); } else if(strcasecmp(token,"ramp_y0min")==0){ getparam("ramp_y0min",&ramp_y0min,PARAM_REAL,1,1); } else if(strcasecmp(token,"ramp_y1max")==0){ getparam("ramp_y1max",&ramp_y1max,PARAM_REAL,1,1); } else if(strcasecmp(token,"ramp_y1min")==0){ getparam("ramp_y1min",&ramp_y1min,PARAM_REAL,1,1); } else if(strcasecmp(token,"pdecay_surfx")==0){ getparam("pdecay_surfx",&pdecay_surfx,PARAM_REAL,1,1); } #endif /*********************** * MY MOD: FILTER MIN X * ************************/ #ifdef FILTER else if (strcasecmp(token, "filter_int")==0){ getparam("filter_int", &filter_int, PARAM_INT, 1, 1); } else if (strcasecmp(token, "filter_min_x")==0){ /* atom deletion filter left cutoff value */ getparam("filter_min_x", &filter_min_x, PARAM_REAL, 1, 1); } else if (strcasecmp(token, "filter_max_x")==0){ /* atom deletion filter left cutoff value */ getparam("filter_max_x", &filter_max_x, PARAM_REAL, 1, 1); } //MYMOD:Brauche ich bei FDTD! else if (strcasecmp(token, "filter_min_y")==0){ /* atom deletion filter left cutoff value */ getparam("filter_min_y", &filter_min_y, PARAM_REAL, 1, 1); } else if (strcasecmp(token, "filter_max_y")==0){ /* atom deletion filter left cutoff value */ getparam("filter_max_y", &filter_max_y, PARAM_REAL, 1, 1); } else if (strcasecmp(token, "filter_min_z")==0){ /* atom deletion filter left cutoff value */ getparam("filter_min_z", &filter_min_z, PARAM_REAL, 1, 1); } else if (strcasecmp(token, "filter_max_z")==0){ /* atom deletion filter left cutoff value */ getparam("filter_max_z", &filter_max_z, PARAM_REAL, 1, 1); } #endif #ifdef LASER else if (strcasecmp(token, "laser_delta_temp")==0){ /* maximum heat added by laser (at the surface) (in maxwell routine) */ getparam("laser_delta_temp", &laser_delta_temp, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_mu")==0){ /* absorption coefficient (always needed)*/ getparam("laser_mu", &laser_mu, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_offset")==0){ /* offset of sample from origin */ getparam("laser_offset", &laser_offset, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_dir")==0){ /* direction of incidence of laser ( for now only along coordinate axes ) (always needed)*/ getparam("laser_dir", &laser_dir, PARAM_INT, DIM, DIM); } #ifdef LASERYZ else if (strcasecmp(token, "laser_sigma_w_y")==0){ /* y-center of gaussian laser-pulse */ getparam("laser_sigma_w_y", &laser_sigma_w_y, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_sigma_w_z")==0){ /* z-center of gaussian laser-pulse */ getparam("laser_sigma_w_z", &laser_sigma_w_z, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_sigma_w0")==0){ /* sigma_0 of spacial laser fluence */ getparam("laser_sigma_w0", &laser_sigma_w0, PARAM_REAL, 1,1); // MY MOD: in imd_laser.c wird laser_sigma_w0=1/(laser_sigma_w0^2), // warum auch immer...Deswegen speicher ich den beamradius // zusätzlich in einer eigenen Variablen, da ich nachher // zur Berechnung der Laserintensität die effektive, // bestrahle flaeche brauche //beam_radius=laser_sigma_w0; } else if (strcasecmp(token,"laser_tem_mode")==0) { /* TEM_xy laser mode */ getparam(token,&laser_tem_mode,PARAM_INT,3,3); switch ( laser_tem_mode.x ) /* Gauss Laguerre = 0, Gauss Hermite = 1 */ { case 0: { switch ( laser_tem_mode.y ) { case 0: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_00; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_01; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_02; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_03; break; } } break; case 1: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_10; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_11; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_12; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_13; break; } } break; case 2: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_20; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_21; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_22; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_23; break; } } break; case 3: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_30; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_31; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_32; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_33; break; } } break; } } break; case 1: { switch ( laser_tem_mode.y ) { case 0: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_00; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_01; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_02; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_03; break; } } break; case 1: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_10; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_11; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_12; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_13; break; } } break; case 2: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_20; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_21; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_22; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_23; break; } } break; case 3: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_30; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_31; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_32; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_33; break; } } break; } } break; } } #endif else if (strcasecmp(token, "laser_sigma_e")==0){ /* area density of pulse energy (for rescaling method) */ getparam("laser_sigma_e", &laser_sigma_e, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_sigma_t")==0){ /* Pulse duration ( power is 1/e*P_max at t=t_0 +/- sigma_t ) */ getparam("laser_sigma_t", &laser_sigma_t, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_t_0")==0){ /* time of maximum pulse intensity */ getparam("laser_t_0", &laser_t_0, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_sigma_e1")==0){ /* area density of pulse energy (for rescaling method) */ getparam("laser_sigma_e1", &laser_sigma_e1, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_sigma_t1")==0){ /* Pulse duration ( power is 1/e*P_max at t=t_0 +/- sigma_t ) */ getparam("laser_sigma_t1", &laser_sigma_t1, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_t_1")==0){ /* time of maximum of second pulse intensity */ getparam("laser_t_1", &laser_t_1, PARAM_REAL, 1,1); } else if (strcasecmp(token, "laser_atom_vol")==0){ /* Volume per particle (inverse density) */ getparam("laser_atom_vol", &laser_atom_vol, PARAM_REAL, 1,1); } else if (strcasecmp(token,"laser_rescale_mode")==0) { /* What rescale mode? */ getparam("laser_rescale_mode",&laser_rescale_mode,PARAM_INT,1,1); switch ( laser_rescale_mode ) { case 0 : do_laser_rescale = laser_rescale_dummy; break; case 1 : do_laser_rescale = laser_rescale_1; break; case 2 : do_laser_rescale = laser_rescale_2; break; case 3 : do_laser_rescale = laser_rescale_3; break; case 4 : #ifdef TTM do_laser_rescale = laser_rescale_ttm; /* change electron temperature source terms, not atom velocities */ #else error("Please compile with TTM if you want to use this laser rescale mode.\n"); #endif break; default : error("Illegal value for parameter laser_rescale_mode.\n"); break; } /* switch */ } #endif #ifdef STRESS_TENS else if (strcasecmp(token, "press_int")==0){ /* number of steps between pressure writes */ getparam(token, &press_int, PARAM_INT, 1,1); } else if (strcasecmp(token, "presstens_ext")==0){ /* external pressure tensor for relaxation */ getparam(token, &presstens_ext, PARAM_REAL, DIM*(DIM+1)/2,DIM*(DIM+1)/2); } #endif #ifdef CNA else if (strcasecmp(token,"cna_start")==0) { /* step at which CNA begins */ getparam("cna_start",&cna_start,PARAM_INT,1,1); } else if (strcasecmp(token,"cna_end")==0) { /* step at which CNA ends */ getparam("cna_end",&cna_end,PARAM_INT,1,1); } else if (strcasecmp(token,"cna_int")==0) { /* number of steps between CNA */ getparam("cna_int",&cna_int,PARAM_INT,1,1); } else if (strcasecmp(token, "cna_rcut")==0){ /* cutoff */ getparam("cna_rcut", &cna_rcut, PARAM_REAL, 1,1); } else if (strcasecmp(token,"cna_write")==0) { /* pair type to be written out */ cna_write_n = getparam("cna_write",cna_writev,PARAM_INT,1,8); } else if (strcasecmp(token,"cna_crist")==0) { /* determine crystallinity of atoms */ cna_crist_n = getparam("cna_crist",cna_cristv,PARAM_INT,1,4); } else if (strcasecmp(token,"cna_ll")==0) { /* lower left corner of partial box */ getparam("cna_ll", &cna_ll,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"cna_ur")==0) { /* upper right corner of partial box */ getparam("cna_ur", &cna_ur,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"cna_stat")==0) { /* write statistics */ cna_write_statistics = 1; } #endif #ifdef ADA else if (strcasecmp(token, "ada_nbr_rcut") == 0) { /* cutoff radius for angle distribution analysis neighbor,*/ /* squared value is needed*/ getparam("ada_nbr_rcut", &ada_nbr_r2cut, PARAM_REAL, 1, 1); ada_nbr_r2cut = ada_nbr_r2cut * ada_nbr_r2cut; } else if (strcasecmp(token, "ada_write_int") == 0) { /* write statistics interval*/ getparam("ada_write_int",&ada_write_int,PARAM_INT,1,1); } else if (strcasecmp(token, "ada_crystal_structure") == 0) { getparam(token,tmpstr,PARAM_STR,1,255); if (strcasecmp(tmpstr,"fcc") == 0) { ada_crystal_structure = ADA_FCC_CONFIG; } else if (strcasecmp(tmpstr,"bcc") == 0) { ada_crystal_structure = ADA_BCC_CONFIG; } else if (strcasecmp(tmpstr,"ackland") == 0) { ada_crystal_structure = ADA_ACKLAND_CONFIG; } } else if (strcasecmp(token, "ada_latticeConst") == 0) { /* lattice constant */ getparam("ada_latticeConst",&ada_latticeConst,PARAM_REAL,1,1); } #endif #ifdef NYETENSOR else if (strcasecmp(token,"nye_rotationAxis_x")==0) { /* lattice orientation in x direction */ getparam("nye_rotationAxis_x",&nye_rotationAxis_x,PARAM_REAL,3,3); } else if (strcasecmp(token,"nye_rotationAxis_y")==0) { /* lattice orientation in y direction */ getparam("nye_rotationAxis_y",&nye_rotationAxis_y,PARAM_REAL,3,3); } else if (strcasecmp(token,"nye_rotationAxis_z")==0) { /* lattice orientation in z direction */ getparam("nye_rotationAxis_z",&nye_rotationAxis_z,PARAM_REAL,3,3); } #endif #ifdef VISCOUS else if (strcasecmp(token,"viscous_friction")==0) { getparam("viscous_friction",&viscous_friction,PARAM_REAL,1,1); } #endif #ifdef LOADBALANCE else if (strcasecmp(token, "lb_contractionRate") == 0) { /* load balance contraction rate */ getparam("lb_contractionRate",&lb_contractionRate,PARAM_REAL,1,1); } else if (strcasecmp(token, "lb_frequency") == 0) { /* load balance contraction rate */ getparam("lb_frequency",&lb_frequency,PARAM_INT,1,1); } else if (strcasecmp(token, "lb_writeStatus") == 0) { /* load balance contraction rate */ getparam("lb_writeStatus",&lb_writeStatus,PARAM_INT,1,1); } else if (strcasecmp(token, "lb_maxLoadTolerance") == 0) { /* tolerated load imbalance between cpus */ getparam("lb_maxLoadTolerance",&lb_maxLoadTolerance,PARAM_REAL,1,1); } else if (strcasecmp(token, "lb_maxLoadToleranceFactorForReset") == 0) { /* tolerated load imbalance between cpus for reset */ getparam("lb_maxLoadToleranceFactorForReset",&lb_maxLoadToleranceFactorForReset,PARAM_REAL,1,1); } else if (strcasecmp(token, "lb_preRuns") == 0) { /* load balance runs before simulation starts*/ getparam("lb_preRuns",&lb_preRuns,PARAM_INT,1,1); } else if (strcasecmp(token, "lb_balancingType") == 0) { /* Load balancing strategy*/ getparam("lb_balancingType",&lb_balancingType,PARAM_INT,1,1); } else if (strcasecmp(token, "lb_iterationsPerReset") == 0) { /* load balance minimum iteration between resets*/ getparam("lb_iterationsPerReset",&lb_iterationsPerReset,PARAM_INT,1,1); } else if (strcasecmp(token, "lb_minStepsBetweenReset") == 0) { /* load balance minimum lb steps between resets*/ getparam("lb_minStepsBetweenReset",&lb_minStepsBetweenReset,PARAM_INT,1,1); } #endif #ifdef DISLOC else if (strcasecmp(token,"reffile")==0) { /* filename for reference configuration */ error( "Parameter reffile no longer supported - consult DISLOC documentation"); } else if (strcasecmp(token,"dem_int")==0) { /* number of steps between picture writes */ getparam(token,&dem_int,PARAM_INT,1,1); } else if (strcasecmp(token,"dsp_int")==0) { /* number of steps between picture writes */ getparam(token,&dsp_int,PARAM_INT,1,1); } else if (strcasecmp(token,"update_ort_ref")==0) { /* step number to compute ort_ref */ getparam(token,&up_ort_ref,PARAM_INT,1,1); } else if (strcasecmp(token,"min_dpot")==0) { /* minimum Epot difference */ getparam(token,&min_dpot,PARAM_REAL,1,1); } else if (strcasecmp(token,"min_dsp2")==0) { /* minimum square displacement in .dsp files */ getparam(token,&min_dsp2,PARAM_REAL,1,1); } else if (strcasecmp(token,"reset_Epot_step")==0) { /* step at which to compute Epot_ref (if calc_Epot_ref==1) */ getparam(token,&reset_Epot_step,PARAM_INT,1,1); } else if (strcasecmp(token,"calc_Epot_ref")==0) { /* read (0) or compute (1) reference potential energy */ getparam(token,&calc_Epot_ref,PARAM_INT,1,1); } else if (strcasecmp(token,"Epot_diff")==0) { /* write Epot (0) or Epot_diff (1) */ getparam(token,&Epot_diff,PARAM_INT,1,1); } #endif #ifdef AVPOS else if (strcasecmp(token,"avpos_start")==0) { /* step at which coordinate addition begins */ getparam("avpos_start",&avpos_start,PARAM_INT,1,1); } else if (strcasecmp(token,"avpos_end")==0) { /* step at which coordinate addition ends */ getparam("avpos_end",&avpos_end,PARAM_INT,1,1); } else if (strcasecmp(token,"avpos_res")==0) { /* number of steps between coordinate addition */ getparam("avpos_res",&avpos_res,PARAM_INT,1,1); } else if (strcasecmp(token,"avpos_int")==0) { /* number of steps between average position writes */ getparam("avpos_int",&avpos_int,PARAM_INT,1,1); } else if (strcasecmp(token,"avpos_steps")==0) { /* number of steps to average over before position writes */ getparam("avpos_steps",&avpos_steps,PARAM_INT,1,1); } else if (strcasecmp(token,"avpos_nwrites")==0) { /* number of position writes, only for processing the itr file */ getparam("avpos_nwrites",&avpos_nwrites,PARAM_INT,1,1); printf("avpos_nwrites: %d\n",avpos_nwrites);fflush(stdout); } else if (strcasecmp(token,"avpos_npwrites")==0) { /* number of pressure writes, only for processing the itr file */ getparam("avpos_npwrites",&avpos_npwrites,PARAM_INT,1,1); printf("avpos_npwrites: %d\n",avpos_npwrites);fflush(stdout); } #endif #if defined(FORCE) || defined(WRITEF) else if (strcasecmp(token,"force_int")==0) { /* number of steps between force writes */ getparam(token,&force_int,PARAM_INT,1,1); } #endif #ifdef WRITEF else if (strcasecmp(token,"force_all")==0) { /* write all forces, or only those of atoms with virtual type */ getparam(token,&force_all,PARAM_INT,1,1); } #endif #ifdef ATDIST else if (strcasecmp(token,"atdist_dim")==0) { /* dimension of atoms distribution array */ getparam(token,&atdist_dim,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"atdist_int")==0) { /* interval between atoms distribution updates */ getparam(token,&atdist_int,PARAM_INT,1,1); } else if (strcasecmp(token,"atdist_pos_int")==0) { /* interval between atom position writes */ getparam(token,&atdist_pos_int,PARAM_INT,1,1); } else if (strcasecmp(token,"atdist_start")==0) { /* step when recording atoms distribution is started */ getparam(token,&atdist_start,PARAM_INT,1,1); } else if (strcasecmp(token,"atdist_end")==0) { /* step when recording atoms distribution is stopped */ getparam(token,&atdist_end,PARAM_INT,1,1); } else if (strcasecmp(token,"atdist_ll")==0) { /* lower left corner of atoms distribution */ getparam(token,&atdist_ll,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"atdist_ur")==0) { /* upper right corner of atoms distribution */ getparam(token,&atdist_ur,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"atdist_per_ll")==0) { /* lower left of periodic extension */ getparam(token,&atdist_per_ll,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"atdist_per_ur")==0) { /* upper right of periodic extension */ getparam(token,&atdist_per_ur,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"atdist_phi")==0) { /* rotation angle around z-axis */ getparam(token,&atdist_phi,PARAM_REAL,1,1); atdist_phi *= 8 * atan(1.0); } #endif #ifdef DIFFPAT else if (strcasecmp(token,"diffpat_dim")==0) { /* dimension of atoms distribution array */ getparam(token,&diffpat_dim,PARAM_INT,DIM,DIM); } else if (strcasecmp(token,"diffpat_int")==0) { /* interval between diffraction pattern updates */ getparam(token,&diffpat_int,PARAM_INT,1,1); } else if (strcasecmp(token,"diffpat_start")==0) { /* step when diffraction pattern recording is started */ getparam(token,&diffpat_start,PARAM_INT,1,1); } else if (strcasecmp(token,"diffpat_end")==0) { /* step when diffraction pattern recording is stopped */ getparam(token,&diffpat_end,PARAM_INT,1,1); } else if (strcasecmp(token,"diffpat_ur")==0) { /* upper right corner of atoms distribution */ getparam(token,&diffpat_ur,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"diffpat_ll")==0) { /* lower left corner of atoms distribution */ getparam(token,&diffpat_ll,PARAM_REAL,DIM,DIM); } else if (strcasecmp(token,"diffpat_weight")==0) { /* scattering strength of different atom types */ real w[10]; if (ntypes==0) error("specify parameter ntypes before diffpat_weight"); getparam(token,w,PARAM_REAL,ntypes,ntypes); for (i=0; i<ntypes; i++) diffpat_weight[i] = (float) w[i]; } #endif #ifdef ORDPAR else if (strcasecmp(token,"op_rcut")==0) { /* cutoff radius for order parameter */ if (ntypes==0) error("specify parameter ntypes before op_rcut"); getparam(token,op_r2_cut,PARAM_REAL,ntypes*ntypes,ntypes*ntypes); for (k=0; k<ntypes*ntypes; k++) op_r2_cut[k] = SQR(op_r2_cut[k]); } else if (strcasecmp(token,"op_weight")==0) { /* weights for order parameter */ if (ntypes==0) error("specify parameter ntypes before op_weight"); getparam(token,op_weight,PARAM_REAL,ntypes*ntypes,ntypes*ntypes); } #endif #ifdef SOCKET_IO else if (strcasecmp(token,"socket_mode")==0) { /* socket mode: client or server */ getparam(token,tmpstr,PARAM_STR,1,255); if (strcasecmp(tmpstr,"client")==0) { server_socket = 0; } else if (strcasecmp(tmpstr,"server")==0) { server_socket = 1; } else { char msg[255]; sprintf(msg,"****** Unknown socket mode %s ignored ******",tmpstr); warning(msg); } } else if (strcasecmp(token,"socket_int")==0) { getparam("socket_int",&socket_int,PARAM_INT,1,1); } else if (strcasecmp(token,"display_host")==0) { getparam("display_host",display_host,PARAM_STR,1,255); } else if (strcasecmp(token,"server_port")==0) { int tmp; getparam(token,&tmp,PARAM_INT,1,1); server_port = tmp; /* conversion to unsigned short */ } else if (strcasecmp(token,"client_port")==0) { int tmp; getparam(token,&tmp,PARAM_INT,1,1); client_port = tmp; /* conversion to unsigned short */ } else if (strcasecmp(token,"use_socket_window")==0) { getparam("use_socket_window",&use_socket_window,PARAM_INT,1,1); } #endif #ifdef NPT else if (strcasecmp(token,"xi")==0) { /* xi variable for NPT thermostat */ getparam("xi",&xi,PARAM_REAL,1,DIM); } else if (strcasecmp(token,"pressure_start")==0) { /* external starting pressure or stress for NPT */ getparam("pressure_start",&pressure_ext,PARAM_REAL_COPY,1,DIM); } else if (strcasecmp(token,"use_current_pressure")==0) { /* set imposed pressure to current system pressure */ use_curr_pressure = 1; } else if (strcasecmp(token,"pressure_end")==0) { /* external end pressure or stress for NPT */ getparam("pressure_end",&pressure_end,PARAM_REAL_COPY,1,DIM); } else if (strcasecmp(token,"tau_xi")==0) { /* time constant tau_xi for NPT thermostat algorithm */ getparam("tau_xi",&isq_tau_xi,PARAM_REAL,1,1); if (isq_tau_xi == (real)0) { error("tau_xi is zero.\n"); } isq_tau_xi = 1.0 / SQR(isq_tau_xi); } else if (strcasecmp(token,"isq_tau_xi")==0) { /* inverse of square of time constant tau_xi for NPT thermostat */ getparam("isq_tau_xi",&isq_tau_xi,PARAM_REAL,1,1); } else if (strcasecmp(token,"inv_tau_xi")==0) { /* inverse of time constant tau_xi for NPT thermostat */ getparam("inv_tau_xi",&isq_tau_xi,PARAM_REAL,1,1); isq_tau_xi = SQR(isq_tau_xi); } else if (strcasecmp(token,"cell_size_tol")==0) { /* rel. tolerance for volume rescaling during NPT sim. */ getparam("cell_size_tol",&cell_size_tolerance,PARAM_REAL,1,1); } #endif #ifdef EAM2 else if (strcasecmp(token,"core_potential_file")==0) { /* EAM2:Filename for the tabulated core-core potential (r^2) */ getparam("core_potential_file",potfilename,PARAM_STR,1,255); have_potfile = 1; } else if (strcasecmp(token,"embedding_energy_file")==0) { /* EAM2:Filename for the tabulated embedding energy(rho_h) */ getparam("embedding_energy_file",eam2_emb_E_filename,PARAM_STR,1,255); } else if (strcasecmp(token,"atomic_e-density_file")==0) { /* EAM2:Filename for the tabulated atomic electron density(r_ij^2) */ getparam("atomic_e-density_file",eam2_at_rho_filename,PARAM_STR,1,255); } #ifdef EEAM else if (strcasecmp(token,"eeam_energy_file")==0) { /* EEAM:Filename for the tabulated energy modification term(p_h) */ getparam(token,eeam_mod_E_filename,PARAM_STR,1,255); } #endif #endif #ifdef ADP else if (strcasecmp(token,"adp_upotfile")==0) { /* ADP dipole distortion potential */ getparam(token,adp_upotfile,PARAM_STR,1,255); } else if (strcasecmp(token,"adp_wpotfile")==0) { /* ADP quadrupole distortion potential */ getparam(token,adp_wpotfile,PARAM_STR,1,255); } #endif #ifdef MEAM else if (strcasecmp(token,"core_potential_file")==0) { /* MEAM:Filename for the tabulated Core-Core Potential (r^2) */ getparam("core_potential_file",potfilename,PARAM_STR,1,255); have_potfile = 1; } else if (strcasecmp(token,"embedding_energy_file")==0) { /* MEAM:Filename for the tabulated Embedding Enery(rho_h) */ getparam("embedding_energy_file",meam_emb_E_filename,PARAM_STR,1,255); have_embed_potfile = 1; } else if (strcasecmp(token,"el_density_file")==0) { /* MEAM:Filename for the tabulated electron density */ getparam("el_density_file",meam_eldensity_filename,PARAM_STR,1,255); have_eldensity_file = 1; } else if (strcasecmp(token,"meam_t_average")==0) { getparam(token, &meam_t_average, PARAM_INT, 1, 1); } else if (strcasecmp(token,"meam_t1")==0) { if (ntypes==0) error("specify parameter ntypes before meam_t1"); getparam(token, meam_t1, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_t2")==0) { if (ntypes==0) error("specify parameter ntypes before meam_t2"); getparam(token, meam_t2, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_t3")==0) { if (ntypes==0) error("specify parameter ntypes before meam_t3"); getparam(token, meam_t3, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_f0")==0) { if (ntypes==0) error("specify parameter ntypes before meam_f0"); getparam(token, meam_f0, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_r0")==0) { if (ntypes==0) error("specify parameter ntypes before meam_r0"); getparam(token, meam_r0, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_beta0")==0) { if (ntypes==0) error("specify parameter ntypes before meam_beta0"); getparam(token, meam_beta0, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_beta1")==0) { if (ntypes==0) error("specify parameter ntypes before meam_beta1"); getparam(token, meam_beta1, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_beta2")==0) { if (ntypes==0) error("specify parameter ntypes before meam_beta2"); getparam(token, meam_beta2, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_beta3")==0) { if (ntypes==0) error("specify parameter ntypes before meam_beta3"); getparam(token, meam_beta3, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"meam_rcut")==0) { if (ntypes==0) error("specify parameter ntypes before meam_rcut"); getparam(token, meam_rcut_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"meam_deltar")==0) { if (ntypes==0) error("specify parameter ntypes before meam_deltar"); getparam(token, meam_deltar_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"meam_cmin")==0) { if (ntypes==0) error("specify parameter ntypes before meam_cmin"); getparam(token, meam_cmin_lin, PARAM_REAL, 1, ntypetriples); } else if (strcasecmp(token,"meam_cmax")==0) { if (ntypes==0) error("specify parameter ntypes before meam_cmax"); getparam(token, meam_cmax_lin, PARAM_REAL, 1, ntypetriples); } else if (strcasecmp(token,"meam_a")==0) { if (ntypes==0) error("specify parameter ntypes before meam_a"); getparam(token, meam_a, PARAM_REAL, 1, ntypes); have_pre_embed_pot = 1; } else if (strcasecmp(token,"meam_e")==0) { if (ntypes==0) error("specify parameter ntypes before meam_e"); getparam(token, meam_e, PARAM_REAL, 1, ntypes); } else if (strcasecmp(token,"meam_rho0")==0) { if (ntypes==0) error("specify parameter ntypes before meam_rho0"); getparam(token, meam_rho0, PARAM_REAL, 1, ntypes); } #endif else if (strcasecmp(token,"debug_potential")==0) { /* write out interpolated potential */ getparam(token, &debug_potential, PARAM_INT, 1, 1); } else if (strcasecmp(token,"debug_pot_res")==0) { /* resolution of test interpolation */ getparam(token, &debug_pot_res, PARAM_INT, 1, 1); } #ifdef PAIR /* analytically defined potentials */ else if (strcasecmp(token,"r_cut")==0) { if (ntypes==0) error("specify parameter ntypes before r_cut"); getparam(token, r_cut_lin, PARAM_REAL, ntypepairs, ntypepairs); have_pre_pot = 1; } else if (strcasecmp(token,"r_begin")==0) { if (ntypes==0) error("specify parameter ntypes before r_begin"); getparam(token, r_begin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"pot_res")==0) { if (ntypes==0) error("specify parameter ntypes before pot_res"); getparam(token, pot_res, PARAM_REAL, ntypepairs, ntypepairs); } /* Lennard-Jones */ else if (strcasecmp(token,"lj_epsilon")==0) { if (ntypes==0) error("specify parameter ntypes before lj_epsilon"); getparam(token ,lj_epsilon_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"lj_sigma")==0) { if (ntypes==0) error("specify parameter ntypes before lj_sigma"); getparam(token, lj_sigma_lin, PARAM_REAL, ntypepairs, ntypepairs); } /* Gauss Part of Lennard-Jones-Gauss */ else if (strcasecmp(token,"ljg_eps")==0) { if (ntypes==0) error("specify parameter ntypes before lj_sigma"); getparam(token, ljg_eps_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"ljg_r0")==0) { if (ntypes==0) error("specify parameter ntypes before lj_sigma"); getparam(token, ljg_r0_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"ljg_sig")==0) { if (ntypes==0) error("specify parameter ntypes before lj_sigma"); getparam(token, ljg_sig_lin, PARAM_REAL, ntypepairs, ntypepairs); } /* Morse */ else if (strcasecmp(token,"morse_epsilon")==0) { if (ntypes==0) error("specify parameter ntypes before morse_epsilon"); getparam(token, morse_epsilon_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"morse_sigma")==0) { if (ntypes==0) error("specify parameter ntypes before morse_sigma"); getparam(token, morse_sigma_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"morse_alpha")==0) { if (ntypes==0) error("specify parameter ntypes before morse_alpha"); getparam(token, morse_alpha_lin, PARAM_REAL, ntypepairs, ntypepairs); } /* Buckingham */ else if (strcasecmp(token,"buck_a")==0) { if (ntypes==0) error("specify parameter ntypes before buck_a"); getparam(token, buck_a_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"buck_c")==0) { if (ntypes==0) error("specify parameter ntypes before buck_c"); getparam(token, buck_c_lin, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"buck_sigma")==0) { if (ntypes==0) error("specify parameter ntypes before buck_sigma"); getparam(token, buck_sigma_lin, PARAM_REAL, ntypepairs, ntypepairs); } /* harmonic potential for shell model */ else if (strcasecmp(token,"spring_const")==0) { if (ntypes==0) error("specify parameter ntypes before spring_const"); getparam(token, spring_const, PARAM_REAL, ntypepairs-ntypes, ntypepairs-ntypes); } else if (strcasecmp(token,"fix_bks")==0) { /* fix bks potential */ getparam(token, &fix_bks, PARAM_INT, 1, 1); } #endif #ifdef SM /* no charge update after each step */ else if (strcasecmp(token,"charge_update_steps")==0) { getparam(token, &charge_update_steps, PARAM_INT, 1, 1); } /* keep charges fixed? */ else if (strcasecmp(token,"sm_fixed_charges")==0) { getparam(token, &sm_fixed_charges, PARAM_INT, 1, 1); } /* Initial value of the electronegativity */ else if (strcasecmp(token,"sm_chi_0")==0) { if (ntypes==0) error("specify parameter ntypes before sm_chi_0"); getparam(token, sm_chi_0, PARAM_REAL, ntypes, ntypes); } /* Initial value of the effecitve core charge */ else if (strcasecmp(token,"sm_Z")==0) { if (ntypes==0) error("specify parameter ntypes before sm_Z"); getparam(token, sm_Z, PARAM_REAL, ntypes, ntypes); } /* SM zeta */ else if (strcasecmp(token,"sm_zeta")==0) { if (ntypes==0) error("specify parameter ntypes before sm_zeta"); getparam(token, sm_zeta, PARAM_REAL, ntypes, ntypes); } /* atomic hardness or self-Coulomb repulsion */ else if (strcasecmp(token,"sm_J_0")==0) { if (ntypes==0) error("specify parameter ntypes before sm_J_0"); getparam(token, sm_J_0, PARAM_REAL, ntypes, ntypes); } /* nuclear attraction potential */ else if (strcasecmp(token,"na_pot_file")==0) { getparam(token, na_pot_filename,PARAM_STR,1,255); } /* coulomb repulsive potential */ else if (strcasecmp(token,"cr_pot_file")==0) { getparam(token, cr_pot_filename,PARAM_STR,1,255); } #ifndef NBLIST /* tabulated function erfc/r */ else if (strcasecmp(token,"erfc_file")==0) { getparam(token, erfc_filename,PARAM_STR,1,255); } #endif #endif /* SM */ #ifdef FEFL /*harmonic potential for Einstein crystal */ else if (strcasecmp(token,"spring_rate")==0) { if (ntypes==0) error("specify parameter ntypes before spring_rate"); getparam(token, spring_rate, PARAM_REAL, ntypes, ntypes); have_pre_pot = 1; } else if (strcasecmp(token,"lambda")==0) { /* parameter for potential switching */ getparam(token, &lambda, PARAM_REAL,1,1); } #endif #if defined(COVALENT) || defined(NNBR_TABLE) else if (strcasecmp(token,"neigh_len")==0) { /* number of neighbors */ getparam(token, &neigh_len, PARAM_INT, 1, 1); } #endif #ifdef TTBP else if (strcasecmp(token,"ttbp_constant")==0) { /* force constant (radians); type 0 */ if (ntypes==0) error("specify parameter ntypes before ttbp_constant"); getparam(token, ttbp_constant, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"ttbp_constant2")==0) { /* force constant for vashishta potential */ getparam(token, ttbp_constant2, PARAM_REAL, 8, 8); ttbp_vas = 1; /* ntypes^3 values, vashishta-option only supported for ntypes = 2 */ } else if (strcasecmp(token,"ttbp_sp")==0) { /* hybridization of the element type */ if (ntypes==0) error("specify parameter ntypes before ttbp_sp"); getparam(token, ttbp_sp, PARAM_REAL, ntypes, ntypes); } else if (strcasecmp(token,"ttbp_cut")==0) { /* cutoff for smoothing part of vashishta potential */ getparam(token, ttbp_cut, PARAM_REAL, 1, 1); } else if (strcasecmp(token,"ttbp_potfile")==0) { /* filename for ttbp potential data */ getparam(token, ttbp_potfilename, PARAM_STR, 1, 255); } #endif #ifdef STIWEB else if (strcasecmp(token,"stiweb_a")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_a"); getparam(token, stiweb_a, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_b")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_b"); getparam(token, stiweb_b, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_p")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_p"); getparam(token, stiweb_p, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_q")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_q"); getparam(token, stiweb_q, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_a1")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_a1"); getparam(token, stiweb_a1, PARAM_REAL, ntypepairs, ntypepairs); have_pre_pot = 1; } else if (strcasecmp(token,"stiweb_de")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_de"); getparam(token, stiweb_de, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_a2")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_a2"); getparam(token, stiweb_a2, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_ga")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_ga"); getparam(token, stiweb_ga, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"stiweb_la")==0) { if (ntypes==0) error("specify parameter ntypes before stiweb_la"); getparam(token, stiweb_la, PARAM_REAL, ntypepairs, ntypepairs); } #endif #if defined(TERSOFF) || defined(TERSOFFMOD) || defined(BRENNER) /* Parameters for Tersoff potential */ else if (strcasecmp(token,"ters_r_cut")==0) { if (ntypes==0) error("specify parameter ntypes before ters_r_cut"); getparam(token, ters_r_cut, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"ters_r0")==0) { if (ntypes==0) error("specify parameter ntypes before ters_r0"); getparam(token, ters_r0, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"ters_a")==0) { if (ntypes==0) error("specify parameter ntypes before ters_a"); getparam(token, ters_a, PARAM_REAL, ntypepairs, ntypepairs); #if defined(TERSOFF) || defined(BRENNER) have_pre_pot = 1; #endif } else if (strcasecmp(token,"ters_b")==0) { if (ntypes==0) error("specify parameter ntypes before ters_b"); getparam(token, ters_b, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"ters_la")==0) { if (ntypes==0) error("specify parameter ntypes before ters_la"); getparam(token, ters_la, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"ters_mu")==0) { if (ntypes==0) error("specify parameter ntypes before ters_mu"); getparam(token, ters_mu, PARAM_REAL, ntypepairs, ntypepairs); } #if defined(TERSOFF) || defined(BRENNER) else if (strcasecmp(token,"ters_chi")==0) { if (ntypes==0) error("specify parameter ntypes before ters_chi"); getparam(token, ters_chi, PARAM_REAL, ntypepairs-ntypes, ntypepairs-ntypes); } else if (strcasecmp(token,"ters_om")==0) { if (ntypes==0) error("specify parameter ntypes before ters_om"); getparam(token, ters_om, PARAM_REAL, ntypepairs-ntypes, ntypepairs-ntypes); } /* nvalues is ntypes for TERSOFF or TERSOFFMOD and ntypepairs for TERSOFF2 or TERSOFFMOD2 */ else if (strcasecmp(token,"ters_ga")==0) { if (ntypes==0) error("specify parameter ntypes before ters_ga"); getparam(token, ters_ga, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_n")==0) { if (ntypes==0) error("specify parameter ntypes before ters_n"); getparam(token, ters_n, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_c")==0) { if (ntypes==0) error("specify parameter ntypes before ters_c"); getparam(token, ters_c, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_d")==0) { if (ntypes==0) error("specify parameter ntypes before ters_d"); getparam(token, ters_d, PARAM_REAL, nvalues, nvalues); } #else /* TERSOFFMOD */ else if (strcasecmp(token,"ters_eta")==0) { if (ntypes==0) error("specify parameter ntypes before ters_eta"); getparam(token, ters_eta, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_delta")==0) { if (ntypes==0) error("specify parameter ntypes before ters_delta"); getparam(token, ters_delta, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_alpha")==0) { if (ntypes==0) error("specify parameter ntypes before ters_alpha"); getparam(token, ters_alpha, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_beta")==0) { if (ntypes==0) error("specify parameter ntypes before ters_beta"); getparam(token, ters_beta, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_c1")==0) { if (ntypes==0) error("specify parameter ntypes before ters_c1"); getparam(token, ters_c1, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_c2")==0) { if (ntypes==0) error("specify parameter ntypes before ters_c2"); getparam(token, ters_c2, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_c3")==0) { if (ntypes==0) error("specify parameter ntypes before ters_c3"); getparam(token, ters_c3, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_c4")==0) { if (ntypes==0) error("specify parameter ntypes before ters_c4"); getparam(token, ters_c4, PARAM_REAL, nvalues, nvalues); } else if (strcasecmp(token,"ters_c5")==0) { if (ntypes==0) error("specify parameter ntypes before ters_c5"); getparam(token, ters_c5, PARAM_REAL, nvalues, nvalues); } #endif else if (strcasecmp(token,"ters_h")==0) { if (ntypes==0) error("specify parameter ntypes before ters_h"); getparam(token, ters_h, PARAM_REAL, nvalues, nvalues); } #endif #ifdef BRENNER #endif #ifdef KEATING /* Parameters for Keating potential */ else if (strcasecmp(token,"keating_r_cut")==0) { if (ntypes==0) error("specify parameter ntypes before keating_r_cut"); getparam(token, keating_r_cut, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"keating_alpha")==0) { if (ntypes==0) error("specify parameter ntypes before keating_alpha"); getparam(token, keating_alpha, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"keating_d")==0) { if (ntypes==0) error("specify parameter ntypes before keating_d"); getparam(token, keating_d, PARAM_REAL, ntypepairs, ntypepairs); } else if (strcasecmp(token,"keating_beta")==0) { if (ntypes==0) error("specify parameter ntypes before keating_beta"); getparam(token, keating_beta, PARAM_REAL, ntypes*ntypepairs, ntypes*ntypepairs); } #endif #if defined(EWALD) || defined(COULOMB) || defined(USEFCS) /* charge */ else if (strcasecmp(token,"charge")==0) { if (ntypes==0) error("specify parameter ntypes before charge"); getparam(token,charge,PARAM_REAL,ntypes,ntypes); } /* coul_eng */ else if (strcasecmp(token,"coul_eng")==0) { getparam(token,&coul_eng,PARAM_REAL,1,1); } #endif #ifdef USEFCS else if (strcasecmp(token,"fcs_method")==0) { /* FCS method */ getparam(token,tmpstr,PARAM_STR,1,255); if (strcasecmp(tmpstr,"direct")==0) { fcs_method = FCS_METH_DIRECT; } else if (strcasecmp(tmpstr,"pepc")==0) { fcs_method = FCS_METH_PEPC; } else if (strcasecmp(tmpstr,"fmm")==0) { fcs_method = FCS_METH_FMM; } else if (strcasecmp(tmpstr,"p3m")==0) { fcs_method = FCS_METH_P3M; } else if (strcasecmp(tmpstr,"p2nfft")==0) { fcs_method = FCS_METH_P2NFFT; } else if (strcasecmp(tmpstr,"vmg")==0) { fcs_method = FCS_METH_VMG; } else if (strcasecmp(tmpstr,"pp3mg")==0) { fcs_method = FCS_METH_PP3MG; } } #ifdef PAIR /* delegate near-field to IMD? */ else if (strcasecmp(token,"fcs_near_field_flag")==0) { getparam(token,&fcs_near_field_flag,PARAM_INT,1,1); } /* fcs_rcut for near-field delegation */ else if (strcasecmp(token,"fcs_rcut")==0) { getparam(token,&fcs_rcut,PARAM_REAL,1,1); if (fcs_rcut > 0) have_pre_pot = 1; } #endif /* fcs_tolerance */ else if (strcasecmp(token,"fcs_tolerance")==0) { getparam(token,&fcs_tolerance,PARAM_REAL,1,1); } /* fcs_grid_dim */ else if (strcasecmp(token,"fcs_grid_dim")==0) { getparam(token,&fcs_grid_dim,PARAM_INT,3,3); } /* fcs_max_iter */ else if (strcasecmp(token,"fcs_max_iter")==0) { getparam(token,&fcs_max_iter,PARAM_INT,1,1); } /* fcs_iter_tolerance */ else if (strcasecmp(token,"fcs_iter_tolerance")==0) { getparam(token,&fcs_iter_tolerance,PARAM_REAL,1,1); } /* fcs_pepc_eps */ else if (strcasecmp(token,"fcs_pepc_eps")==0) { getparam(token,&fcs_pepc_eps,PARAM_REAL,1,1); } /* fcs_pepc_theta */ else if (strcasecmp(token,"fcs_pepc_theta")==0) { getparam(token,&fcs_pepc_theta,PARAM_REAL,1,1); } /* fcs_pepc_nthreads */ else if (strcasecmp(token,"fcs_pepc_nthreads")==0) { getparam(token,&fcs_pepc_nthreads,PARAM_INT,1,1); } /* fcs_fmm_absrel */ else if (strcasecmp(token,"fcs_fmm_absrel")==0) { getparam(token,&fcs_fmm_absrel,PARAM_INT,1,1); } /* fcs_fmm_dcorr */ else if (strcasecmp(token,"fcs_fmm_dcorr")==0) { getparam(token,&fcs_fmm_dcorr,PARAM_INT,1,1); } /* fcs_fmm_do_tune */ else if (strcasecmp(token,"fcs_fmm_do_tune")==0) { getparam(token,&fcs_fmm_do_tune,PARAM_INT,1,1); } /* fcs_vmg_max_level */ else if (strcasecmp(token,"fcs_vmg_max_level")==0) { getparam(token,&fcs_vmg_max_level,PARAM_INT,1,1); } /* fcs_vmg_smooth_steps */ else if (strcasecmp(token,"fcs_vmg_smooth_steps")==0) { getparam(token,&fcs_vmg_smooth_steps,PARAM_INT,1,1); } /* fcs_vmg_gamma */ else if (strcasecmp(token,"fcs_vmg_gamma")==0) { getparam(token,&fcs_vmg_gamma,PARAM_INT,1,1); } /* fcs_vmg_near_field_cells */ else if (strcasecmp(token,"fcs_vmg_near_field_cells")==0) { getparam(token,&fcs_vmg_near_field_cells,PARAM_INT,1,1); } /* fcs_vmg_interpol_order */ else if (strcasecmp(token,"fcs_vmg_interpol_order")==0) { getparam(token,&fcs_vmg_interpol_order,PARAM_INT,1,1); } /* fcs_vmg_discr_order */ else if (strcasecmp(token,"fcs_vmg_discr_order")==0) { getparam(token,&fcs_vmg_discr_order,PARAM_INT,1,1); } /* fcs_pp3mg_ghosts */ else if (strcasecmp(token,"fcs_pp3mg_ghosts")==0) { getparam(token,&fcs_pp3mg_ghosts,PARAM_INT,1,1); } /* fcs_pp3mg_degree */ else if (strcasecmp(token,"fcs_pp3mg_degree")==0) { getparam(token,&fcs_pp3mg_degree,PARAM_INT,1,1); } /* fcs_pp3mg_max_part */ else if (strcasecmp(token,"fcs_pp3mg_max_part")==0) { getparam(token,&fcs_pp3mg_max_part,PARAM_INT,1,1); } /* fcs_p2nfft_intpol_order */ else if (strcasecmp(token,"fcs_p2nfft_intpol_order")==0) { getparam(token,&fcs_p2nfft_intpol_order,PARAM_INT,1,1); } /* fcs_p2nfft_epsI */ else if (strcasecmp(token,"fcs_p2nfft_epsI")==0) { getparam(token,&fcs_p2nfft_epsI,PARAM_REAL,1,1); } #endif /* USEFCS */ #if defined(EWALD) || defined(COULOMB) /* smoothing parameter */ else if (strcasecmp(token,"ew_kappa")==0) { getparam(token,&ew_kappa,PARAM_REAL,1,1); } /* k-space cutoff */ else if (strcasecmp(token,"ew_kcut")==0) { getparam(token,&ew_kcut,PARAM_REAL,1,1); } /* r-space cutoff */ else if (strcasecmp(token,"ew_rcut")==0) { getparam(token,&rtmp,PARAM_REAL,1,1); ew_r2_cut = SQR(rtmp); #ifdef KERMODE ke_rcut = rtmp; #endif #ifdef DIPOLE dp_self=2./(3.*rtmp*ew_r2_cut*sqrt(2.*M_PI)); #endif /* DIPOLE */ #ifndef VARCHG have_pre_pot = 1; #endif } /* number of image boxes */ else if (strcasecmp(token,"ew_nmax")==0) { getparam(token,&ew_nmax,PARAM_INT,1,1); } /* test flag */ else if (strcasecmp(token,"ew_test")==0) { getparam(token,&ew_test,PARAM_INT,1,1); } /* potential table resolution */ else if (strcasecmp(token,"coul_res")==0) { getparam(token,&coul_res,PARAM_REAL,1,1); } /* potential table resolution - backwards compatibility */ else if (strcasecmp(token,"dp_res")==0) { getparam(token,&coul_res,PARAM_REAL,1,1); } /* potential table resolution */ else if (strcasecmp(token,"coul_begin")==0) { getparam(token,&coul_begin,PARAM_REAL,1,1); } /* potential table resolution - backwards compatibility */ else if (strcasecmp(token,"dp_begin")==0) { getparam(token,&coul_begin,PARAM_REAL,1,1); } #endif /* EWALD or COULOMB */ #if defined(DIPOLE) || defined(KERMODE) /* dipole fixed? */ else if (strcasecmp(token,"dp_fix")==0) { getparam(token,&dp_fix,PARAM_INT,1,1); } /* dipole field mixing param */ else if (strcasecmp(token,"dp_mix")==0) { getparam(token,&dp_mix,PARAM_REAL,1,1); } /* dipole iteration precision */ else if (strcasecmp(token,"dp_tol")==0) { getparam(token,&dp_tol,PARAM_REAL,1,1); } /* polarisability */ else if (strcasecmp(token,"dp_alpha")==0) { if (ntypes==0) error("specify parameter ntypes before dp_alpha"); dp_alpha = (real *) malloc( ntypes*sizeof(real)); if (NULL==dp_alpha) error("cannot allocate dp_alpha"); getparam(token,dp_alpha,PARAM_REAL,ntypes,ntypes); } /* short-range dipole parameter b */ else if (strcasecmp(token,"dp_b")==0) { if (ntypepairs==0) error("specify parameter ntypes before dp_b"); dp_b = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==dp_b) error("cannot allocate dp_b"); getparam(token,dp_b,PARAM_REAL,ntypepairs,ntypepairs); } /* short-range dipole parameter c */ else if (strcasecmp(token,"dp_c")==0) { if (ntypepairs==0) error("specify parameter ntypes before dp_c"); dp_c = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==dp_c) error("cannot allocate dp_c"); getparam(token,dp_c,PARAM_REAL,ntypepairs,ntypepairs); } #endif /* DIPOLE */ #ifdef KERMODE /* Yukava screening factor 'beta' in KERMODE potential for silica */ else if (strcasecmp(token,"yuk_beta")==0) { getparam(token,&yuk_beta,PARAM_REAL,1,1); } else if (strcasecmp(token,"yuk_smoothlength")==0) { getparam(token,&yuk_smoothlength,PARAM_REAL,1,1); } else if (strcasecmp(token,"smoothlength_ms")==0) { getparam(token,&smoothlength_ms,PARAM_REAL,1,1); } #endif #if ((defined(DIPOLE) || defined(KERMODE) || defined(MORSE)) && !defined(BUCK)) /* Morse-Stretch parameter D */ else if (strcasecmp(token,"ms_D")==0) { if (ntypes==0) error("specify parameter ntypes before ms_D"); ms_D = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==ms_D) error("cannot allocate ms_D"); getparam(token,ms_D,PARAM_REAL,ntypepairs,ntypepairs); have_pre_pot = 1; } /* Morse-Stretch parameter gamma */ else if (strcasecmp(token,"ms_gamma")==0) { if (ntypes==0) error("specify parameter ntypes before ms_gamma"); ms_gamma = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==ms_gamma) error("cannot allocate ms_gamma"); getparam(token,ms_gamma,PARAM_REAL,ntypepairs,ntypepairs); } /* Morse-Stretch parameter r0 */ else if (strcasecmp(token,"ms_r0")==0) { if (ntypes==0) error("specify parameter ntypes before ms_r0"); ms_r0 = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==ms_r0) error("cannot allocate ms_r0"); getparam(token,ms_r0,PARAM_REAL,ntypepairs,ntypepairs); } /* Morse-Stretch spring constant ms_harm_c */ else if (strcasecmp(token,"ms_harm_c")==0) { if (ntypes==0) error("specify parameter ntypes before ms_harm_c"); ms_harm_c = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==ms_harm_c) error("cannot allocate ms_harm_c"); getparam(token,ms_harm_c,PARAM_REAL,ntypepairs,ntypepairs); } /* Morse-Stretch minimum distance ms_rmin */ else if (strcasecmp(token,"ms_rmin")==0) { if (ntypes==0) error("specify parameter ntypes before ms_rmin"); ms_r2_min = (real *) malloc( ntypepairs*sizeof(real)); if (NULL==ms_r2_min) error("cannot allocate ms_r2_min"); getparam(token,ms_r2_min,PARAM_REAL,ntypepairs,ntypepairs); for (i=0;i<ntypepairs;i++) { rtmp = SQR(ms_r2_min[i]); ms_r2_min[i] = rtmp; } } #endif /* DIPOLE or MORSE */ #ifdef EXTF /* external homogeneous electrostatic field */ else if (strcasecmp(token,"extf")==0) { getparam(token,&extf,PARAM_REAL,DIM,DIM); } #endif #ifdef EPITAX /* Parameters for option epitax */ else if (strcasecmp(token,"epitax_rate")==0) { /* rate of creation of particles */ if (ntypes==0) error("specify parameter ntypes before epitax_rate"); getparam("epitax_rate",epitax_rate,PARAM_INT,ntypes,ntypes); } else if (strcasecmp(token,"epitax_type")==0) { /* type of particles to be created */ if (ntypes==0) error("specify parameter ntypes before epitax_type"); getparam("epitax_type",epitax_type,PARAM_INT,ntypes,ntypes); } else if (strcasecmp(token,"epitax_mass")==0) { /* mass of particles to be created */ if (ntypes==0) error("specify parameter ntypes before epitax_mass"); getparam("epitax_mass",epitax_mass,PARAM_REAL,ntypes,ntypes); } else if (strcasecmp(token,"epitax_temp")==0) { /* temperature of particles to be created */ if (ntypes==0) error("specify parameter ntypes before epitax_temp"); getparam("epitax_temp",epitax_temp,PARAM_REAL,ntypes,ntypes); } else if (strcasecmp(token,"epitax_cutoff")==0) { /* parameter for cutoff */ getparam("epitax_cutoff",&epitax_cutoff,PARAM_REAL,1,1); } else if (strcasecmp(token,"epitax_maxsteps")==0) { /* maximal steps in epitax simulation */ getparam("epitax_maxsteps",&epitax_maxsteps,PARAM_INT,1,1); } else if (strcasecmp(token,"epitax_startstep")==0) { /* steps before atom creation starts */ getparam("epitax_startstep",&epitax_startstep,PARAM_INT,1,1); } else if (strcasecmp(token,"epitax_ctrl")==0) { /* parameter for change of integrator */ getparam("epitax_ctrl",&epitax_ctrl,PARAM_REAL,1,1); } else if (strcasecmp(token,"epitax_height")==0) { /* height of beam creation */ getparam("epitax_height",&epitax_height,PARAM_REAL,1,1); } else if (strcasecmp(token,"epitax_speed")==0) { /* height of beam creation */ getparam("epitax_speed",&epitax_speed,PARAM_REAL,1,1); } #endif #ifdef UNIAX else if (strcasecmp(token,"uniax_r_cut")==0) { /* UNIAX: cutoff radius of uniaxial molecules */ getparam("uniax_r_cut",&uniax_r_cut,PARAM_REAL,1,1); uniax_r2_cut = SQR(uniax_r_cut); cellsz = MAX(cellsz,uniax_r2_cut); } #endif #ifdef RELAX else if (strcasecmp(token,"max_sscount")==0) { /* max nr. of minimizations in quasistat sims */ getparam(token,&max_sscount,PARAM_INT,1,1); } #endif #ifdef EXTPOT else if (strcasecmp(token,"ep_n")==0) { /* EXTPOT: number of external potentials */ getparam(token,&ep_n,PARAM_INT,1,1); if (1 > ep_n) error("At least one extpot must be defined (ep_n < 1)"); for (i=0; i<ep_n; i++) { ep_pos[i] = nullv; ep_vel[i] = nullv; ep_dir[i] = nullv; } } else if (strcasecmp(token,"ep_nind")==0) { /* EXTPOT: number of indentors (remaining extpots are walls) */ getparam(token,&ep_nind,PARAM_INT,1,1); if (ep_nind > ep_n) error("Number of indeters exceeds ep_n"); } else if (strcasecmp(token,"ep_key")==0) { /* EXTPOT: potential key : which potential type to use*/ getparam(token,&ep_key,PARAM_INT,1,1); } else if (strcasecmp(token,"ep_a")==0) { /* EXTPOT: strength of external potential */ getparam(token,&ep_a,PARAM_REAL,1,1); } else if (strcasecmp(token,"extpot_file")==0) { /* EXTPOT: Filename for the tabulated external potential (r^2) */ getparam("expot_file",extpotfilename,PARAM_STR,1,255); if(ep_a!=0) error("either use expotfile or define ep_a"); have_extpotfile = 1; } else if (strcasecmp(token,"ep_max_int")==0) { /* EXTPOT: maximal wait steps during relaxation */ getparam(token,&ep_max_int,PARAM_INT,1,1); if ((ep_max_int > 1) && (ep_max_int < 10)) ep_max_int = 10; } else if (strcasecmp(token,"ep_rcut")==0) { /* EXTPOT: cutoff radius of external potential */ getparam(token,&ep_rcut,PARAM_REAL,1,1); } else if (strcasecmp(token,"ep_pos")==0) { /* EXTPOT: position of external potential */ getparam(token,&rtmp4,PARAM_REAL,4,4); i = (int) rtmp4[0]; if (i >= ep_n) error("Number of external potential exceeds ep_n"); ep_pos[i].x = rtmp4[1]; ep_pos[i].y = rtmp4[2]; ep_pos[i].z = rtmp4[3]; } else if (strcasecmp(token,"ep_vel")==0) { /* EXTPOT: velocity of external potential */ getparam(token,&rtmp4,PARAM_REAL,4,4); i = (int) rtmp4[0]; if (i >= ep_n) error("Number of external potential exceeds ep_n"); ep_vel[i].x = rtmp4[1]; ep_vel[i].y = rtmp4[2]; ep_vel[i].z = rtmp4[3]; } else if (strcasecmp(token,"ep_dir")==0) { /* EXTPOT: direction of external potential */ getparam(token,&rtmp4,PARAM_REAL,4,4); i = (int) rtmp4[0]; if (i >= ep_n) error("Number of external potential exceeds ep_n"); rtmp = SQRT( SQR(rtmp4[1]) + SQR(rtmp4[2]) + SQR(rtmp4[3]) ); if (rtmp < 1e-6) error("parameter ep_dir requires non-zero direction"); ep_dir[i].x = rtmp4[1] / rtmp; ep_dir[i].y = rtmp4[2] / rtmp; ep_dir[i].z = rtmp4[3] / rtmp; } #endif /* EXTPOT */ #ifdef CBE else if (strcasecmp(token,"num_spus")==0) { /* number of SPUs to be used */ getparam(token,&num_spus,PARAM_INT,1,1); /* Make sure parameter just read is in a valid range */ if ( (num_spus<1) || (num_spus>N_SPU_THREADS_MAX) ) { num_spus=N_SPU_THREADS_MAX; } } else if (strcasecmp(token, "num_bufs")==0) { /* Number of argument buffers per SPU */ getparam(token,&num_bufs,PARAM_INT,1,1); if ( (num_bufs<1) || (num_bufs>N_ARGBUF) ) { num_bufs=N_ARGBUF; } } else if (strcasecmp(token, "cbe_pot_steps")==0) { /* number of tabulation steps in potential table */ getparam(token,&cbe_pot_steps,PARAM_INT,1,1); } else if (strcasecmp(token, "cbe_pot_max")==0) { /* maximum value in potential table */ getparam(token,&cbe_pot_max,PARAM_REAL,1,1); } #endif #ifdef KIM else if (strcasecmp(token, "kim_model_name")==0) { /* name of the KIM model for force calculation */ getparam(token, kim_model_name, PARAM_STR, 1, 255); have_potfile = 1; } else if (strcasecmp(token, "kim_el_names")==0) { /* element names, needed for the KIM matching process */ if (ntypes == 0) error ("specify parameter ntypes before kim_elements\n"); getparam(token, kim_el_names, PARAM_CHARPTR, ntypes, 4); } #endif else if (strcasecmp(token,"use_header")==0) { /* shall a header be used */ getparam("use_header",&use_header,PARAM_INT,1,1); } else { char msg[255]; sprintf(msg,"****** Unknown TAG %s ignored ******",token); warning(msg); } } while (!feof(pf)); if (feof(pf)) finished=1; fclose(pf); return finished; } /* getparamfile */ /***************************************************************** * * Check input for nonsense values * ******************************************************************/ void check_parameters_complete() { real tmp; real norm_bend_axis; int k; #ifdef TWOD vektor einsv = {1.0,1.0}; #else vektor einsv = {1.0,1.0,1.0}; #endif vektor this_bend_axis; if (ensemble == 0) { error("missing or unknown ensemble parameter."); } if (timestep == (real)0) { error("timestep is missing or zero."); } if (ntypes == 0) { error("ntypes is missing or zero."); } #ifdef BEND if(bend_nmoments >0) { if(bend_nmoments >6) error("currently only 6 bending moments are supported"); for (k=0;k<bend_nmoments;k++) { this_bend_axis.x = (bend_axis + k)->x; this_bend_axis.y = (bend_axis + k)->y; this_bend_axis.z = (bend_axis + k)->z; if(SPROD(this_bend_axis,this_bend_axis)==0) error("definition of bending moment without axis"); #ifdef RELAX tmp=((fbc_bdforces + (bend_vtype_of_force[k]))->x)* ((fbc_bdforces + (bend_vtype_of_force[k]))->x) + ((fbc_bdforces + (bend_vtype_of_force[k]))->y)* ((fbc_bdforces + (bend_vtype_of_force[k]))->y) + ((fbc_bdforces + (bend_vtype_of_force[k]))->z)* ((fbc_bdforces + (bend_vtype_of_force[k]))->z); #else tmp=((fbc_endbforces + (bend_vtype_of_force[k]))->x)* ((fbc_endbforces + (bend_vtype_of_force[k]))->x) + ((fbc_endbforces + (bend_vtype_of_force[k]))->y)* ((fbc_endbforces + (bend_vtype_of_force[k]))->y) + ((fbc_endbforces + (bend_vtype_of_force[k]))->z)* ((fbc_endbforces + (bend_vtype_of_force[k]))->z); #endif if(tmp==0) error("definition of bending moment without force"); } } #endif #ifdef EXTPOT if(ep_a !=0) printf("Usage of ep_a is depreciated, use extpot_file instead\n"); if(ep_a !=0 && have_extpotfile==1) error("use either ep_a or extpotfile"); if(ep_rcut <=0) error("need a value for ep_rcut"); #endif #if defined(FBC) || defined(RIGID) || defined(DEFORM) if (vtypes == 0) error("FBC, RIGID, and DEFORM require parameter total_types to be set"); #endif if (vtypes == 0) { vtypes = ntypes; restrictions = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==restrictions) error("Cannot allocate memory for restriction vectors\n"); for (k=0; k<vtypes; k++) restrictions[k] = einsv; } if (vtypes < ntypes) error("total_types must not be smaller than ntypes"); #if defined(PAIR) && !defined(VARCHG) && !defined(TERSOFFMOD) if ((have_potfile==0) && (have_pre_pot==0)) error("You must specify a pair interaction!"); #endif #ifdef TEMPCONTROL if (temperature == 0) { error("starttemp is missing or zero."); } if (end_temp == 0) { end_temp = temperature; } #endif #if defined(CORRELATE) || defined(MSQD) if (correl_ts == 0) { if (eng_int != 0) correl_ts = eng_int; else { error("correl_ts is missing or zero."); } } #endif #ifdef CORRELATE if (ncorr_rmax == 0) { error("correl_rmax is missing or zero."); } if (ncorr_tmax == 0) { error("correl_tmax is zero."); } #endif #ifdef NVX if (ensemble==ENS_NVX) { if (hc_int == 0) error ("hc_int is zero."); if (hc_nlayers == 0) error ("hc_nlayers is zero."); } #endif #ifdef FTG if (nslices < 2){ error ("nslices is missing or less than 2."); } if (Tleft == 0 ){ error ("Tleft is missing or zero."); } if (Tright == 0 ){ error ("Tright is missing or zero."); } #endif #ifdef LASER if (laser_dir.x!=0) { laser_dir.x=1; if (laser_dir.y!=0 #ifndef TWOD || laser_dir.z!=0 #endif ) error("Sorry: Laser incidence only along one coordinate axis."); } else if (laser_dir.y!=0) { laser_dir.y=1; #ifndef TWOD if (laser_dir.z!=0){ error("Sorry: Laser incidence only along one coordinate axis."); } } else if (laser_dir.z!=0) { laser_dir.z=1; #endif } else { error("Parameter laser_dir (laser incidence direction) missing."); } if ( (laser_rescale_mode < 0) || (laser_rescale_mode > 4) ) { error("Parameter laser_rescale_mode must be a positive integer < 5 !"); } #endif /* LASER */ #ifdef LASERYZ if ( (laser_sigma_w_y == 0.0 ) || (laser_sigma_w_z == 0.0) ){ warning("laser_sigma_w_y and / or laser_sigma_w_z is set to 0.0 - this seems to be a nonsense value!\n"); } if ( (laser_sigma_w_y < 0.0) || (laser_sigma_w_z < 0.0)){ error("laser_sigma_w_y and / or laser_sigma_w_z is smaller than zero - this is nonsense!\n"); } if ( laser_sigma_w0 <= 0.0){ error("laser_sigma_w0 is equal or less than zero - which is nonsense or means zero energy density."); } if ( (laser_tem_mode.x < 0 ) || (laser_tem_mode.x > 1 ) ) { error("Laser TEM Mode has to be either Gauss-Laguerre (0) or Gauss-Hermite (1)."); } if ( (laser_tem_mode.y < 0) || (laser_tem_mode.y > 3) || (laser_tem_mode.z < 0 ) || (laser_tem_mode.z > 3) ) { error("Only Laser TEM_xy modes for x=1...3 and y=1...3 are supported yet."); } #endif #ifdef TTM if (fd_update_steps <= 0) { warning("Ignoring illegal value of fd_update_steps, using 1\n"); fd_update_steps=1; } if (init_t_el<0) { warning("Ignoring illegal value of init_t_el, using lattice temp\n"); init_t_el=0.0; } if (fix_t_el!=0 && init_t_el==0.0) error("You need to specify init_t_el for enabled fix_t_el!\n"); if (strcasecmp(fd_one_d_str,"x")==0 || strcasecmp(fd_one_d_str,"1")==0) { fd_one_d=1; } else if (strcasecmp(fd_one_d_str,"y")==0 || strcasecmp(fd_one_d_str,"2")==0){ fd_one_d=2; } else if (strcasecmp(fd_one_d_str,"z")==0 || strcasecmp(fd_one_d_str,"3")==0){ fd_one_d=3; } else if (strcasecmp(fd_one_d_str,"")!=0) { warning("Ignoring unknown value of fe_one_d\n"); } //wird nicht mehr benutzt! // if ((fd_gamma==0.0 && fd_c==0.0)||(fd_gamma!=0.0 && fd_c!=0.0)) { // error ("You must specify either fd_gamma or fd_c for TTM simulations."); // } // MY MOD: if(fd_min_atoms==0) error("You must specify fd_min_atoms"); #if defined(FDTD) || defined(TMM) if(I0==0) error("You must specify peak intensity I0 in W/m^2"); if(lambda==0) error("You must specify lambda in meters"); #endif //FDTD und TMM #if defined(FDTD) || defined(LASER) || defined(TMM) if(fd_n_timesteps==0) error("You must specify fd_n_timesteps"); #endif //FDTD und LASER und TMM if(atomic_weight==0) error("You must specify atomic_weight"); if(atomic_charge==0) error("You must specify atomic_charge"); #ifdef TTM1D if(ttmdimx==0) error("You must specify ttmdimx"); #endif #endif /* TTM */ #ifdef FILTER if(filter_int==0) error("You must specify filter_int"); #endif //ENDOF MYMOD #ifdef MPI { #ifdef TWOD int want_cpus = cpu_dim.x * cpu_dim.y; #else int want_cpus = cpu_dim.x * cpu_dim.y * cpu_dim.z; #endif if ( want_cpus != num_cpus) calc_cpu_dim(); if ((want_cpus != num_cpus) && (want_cpus != 1)) warning("cpu_dim incompatible with available CPUs, using default"); } #endif #ifdef SOCKET_IO if ((!server_socket) && (display_host[0]=='\0')) { error("display_host name or IP address missing."); } #endif #ifdef UNIAX if (uniax_r_cut == 0) { error("uniax_r_cut is missing or zero."); } #endif #if defined(FRAC) || defined(FTG) if (stadium2.x==0 && stadium2.y==0 ){ stadium2.x = box_x.x/2.0; stadium2.y = box_y.y/2.0; } #endif #ifdef AVPOS fprintf(stdout, "avpos_start: %d imdrestart*checkpt_int: %d\n", avpos_start, imdrestart*checkpt_int); /* Default initialisation of end time */ if (0==avpos_end) avpos_end = steps_max; #ifdef STRESS_TENS if (press_int % avpos_int !=0 || avpos_res % eng_int !=0) error("For averaged pressure writes, press_int musst be an integer multiple of avpos_int and avpos_res must be an integer multiple of eng_int"); #endif #endif #ifdef ATDIST if (0==atdist_end) atdist_end = steps_max; #endif #ifdef CG if ((linmin_maxsteps==0) || (linmin_tol==0.0) ) error("You have to set parameters for the linmin search"); #endif #ifdef HOMDEF if (relax_rate > 0.0) { #ifdef STRESS_TENS if (relax_mode == -1) relax_mode = RELAX_FULL; #else if (relax_mode == -1) relax_mode = RELAX_ISO; if ((relax_mode == RELAX_FULL) || (relax_mode == RELAX_AXIAL)) error("Pressure relaxation modes axial and full require option stress"); #endif } #endif #if defined(DIFFPAT) && defined(TWOD) error("Option DIFFPAT is not supported in 2D"); #endif #ifdef KIM if (strcmp(kim_el_names[0],"\0")==0) error("kim_el_names is not properly set in parameter file"); #endif #if defined(ADA) && defined(TWOD) error("Option ADA is not supported in 2D"); #endif #ifdef ADA if (ada_nbr_r2cut == 0. && ada_latticeConst == 0.){ error("Nearest neighbor cutoff distance \"ada_nbr_rcut\" or lattice constant \"ada_latticeConst\" is missing or zero in the parameter file"); } #endif #ifdef NYETENSOR if (ada_latticeConst == 0.){ error("Lattice constant \"ada_latticeConst\" is missing or zero in the parameter file"); } if (SPROD(nye_rotationAxis_x, nye_rotationAxis_x) == 0.) error("Crystal orientation in x direction \"nye_rotationAxis_x\" is missing or zero in the parameter file"); if (SPROD(nye_rotationAxis_y, nye_rotationAxis_y) == 0.) error("Crystal orientation in y direction \"nye_rotationAxis_y\" is missing or zero in the parameter file"); if (SPROD(nye_rotationAxis_z, nye_rotationAxis_z) == 0.) error("Crystal orientation in x direction \"nye_rotationAxis_z\" is missing or zero in the parameter file"); #endif } /***************************************************************** * * read command line on master process * ******************************************************************/ void read_command_line(int argc,char **argv) { if (0==myid) { /* check for restart, process options */ strcpy(progname,argv[0]); while ((argc > 1) && (argv[1][0] =='-')) { switch (argv[1][1]) { /* r - restart */ case 'r': if (argv[1][2]=='\0') { if (NULL != argv[2]) { imdrestart = atoi(argv[2]); --argc; ++argv; } } else imdrestart = atoi(&argv[1][2]); break; case 'p': if (argv[1][2]=='\0') { if (NULL != argv[2]) { strcpy(paramfilename,argv[2]); --argc; ++argv; } } else strcpy(paramfilename,&argv[1][2]); break; default: printf("Illegal option %s \n",argv[1]); usage(); exit(-1); } ++argv; --argc; } } #ifdef MPI /* broadcast everything */ MPI_Bcast( paramfilename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( progname, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( &imdrestart, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif } /***************************************************************** * * read parameters * ******************************************************************/ int read_parameters(char *paramfname, int phase) { int i, finished = 0; str255 fname; FILE *testfile; if (0==myid) { /* write itr-file for the next phase */ if (phase > 1) write_itr_file(-1, steps_max,""); finished = getparamfile(paramfname, phase); /* read initial itr-file (if there is any), but keep steps_min value */ if ((phase == 1) && (0 < strlen(itrfilename))) { int tmp_steps = steps_min; getparamfile(itrfilename, 1); steps_min = tmp_steps; } /* read back itr-file for the next phase */ if (phase > 1) { #ifdef NEB sprintf(outfilename, "%s.%02d", neb_outfilename, myrank); #endif sprintf( itrfilename,"%s-final.itr", outfilename ); getparamfile(itrfilename, 1); } check_parameters_complete(); /* Get restart parameters if restart */ if (0 != imdrestart) { /* read itr-file */ sprintf(fname,"%s.%d.itr",outfilename,imdrestart); testfile = fopen(fname,"r"); if (NULL==testfile) { sprintf(fname,"%s.%05d.itr",outfilename,imdrestart); testfile = fopen(fname,"r"); if (NULL==testfile) { error_str("file %s not found", fname); } else { fclose(testfile); } } else { fclose(testfile); } getparamfile(fname,1); //MYMOD: Fuer MPIIO anpassen /* get restart configuration */ sprintf(infilename,"%s.%d.%s",outfilename,imdrestart,"chkpt"); #ifdef MPIIO sprintf(infilename,"%s.%d.%s",outfilename,imdrestart,"mpiio"); #endif testfile = fopen(infilename,"r"); if (NULL==testfile) { sprintf(infilename,"%s.%05d.%s",outfilename,imdrestart,"chkpt"); #ifdef MPIIO sprintf(infilename,"%s.%05d.%s",outfilename,imdrestart,"mpiio"); #endif testfile = fopen(infilename,"r"); if (NULL==testfile) { error_str("file %s not found", infilename); } else { fclose(testfile); } } else { fclose(testfile); } printf("Restarting from %s.\n",infilename); } else if (phase == 1) { /* if not restart and phase 1: delete files to which we append */ sprintf(fname,"%s.eng", outfilename); unlink(fname); sprintf(fname,"%s.minmax.Ekin", outfilename); unlink(fname); sprintf(fname,"%s.minmax.Epot", outfilename); unlink(fname); #ifdef STRESS_TENS sprintf(fname,"%s.minmax.press", outfilename); unlink(fname); sprintf(fname,"%s.minmax.presstens", outfilename); unlink(fname); sprintf(fname,"%s.minmax.presstens_xx", outfilename); unlink(fname); sprintf(fname,"%s.minmax.presstens_yy", outfilename); unlink(fname); #ifndef TWOD sprintf(fname,"%s.minmax.presstens_zz", outfilename); unlink(fname); sprintf(fname,"%s.minmax.presstens_yz", outfilename); unlink(fname); sprintf(fname,"%s.minmax.presstens_zx", outfilename); unlink(fname); #endif sprintf(fname,"%s.minmax.presstens_xy", outfilename); unlink(fname); #endif /* STRESS_TENS */ #if defined(SHOCK) || defined(TTM) sprintf(fname,"%s.minmax.vxavg", outfilename); unlink(fname); #endif #ifdef SHOCK sprintf(fname,"%s.minmax.Ekin_long", outfilename); unlink(fname); sprintf(fname,"%s.minmax.Ekin_trans", outfilename); unlink(fname); sprintf(fname,"%s.minmax.Ekin_comp", outfilename); unlink(fname); sprintf(fname,"%s.minmax.shock_shear", outfilename); unlink(fname); sprintf(fname,"%s.minmax.shear_aniso", outfilename); unlink(fname); sprintf(fname,"%s.minmax.pressxy", outfilename); unlink(fname); sprintf(fname,"%s.minmax.pressyz", outfilename); unlink(fname); sprintf(fname,"%s.minmax.presszx", outfilename); unlink(fname); #endif sprintf(fname,"%s.minmax.dens", outfilename); unlink(fname); sprintf(fname,"%s.tempdist", outfilename); unlink(fname); sprintf(fname,"%s.msqd", outfilename); unlink(fname); } } #ifdef MPI MPI_Bcast( &finished, 1, MPI_INT, 0, MPI_COMM_WORLD); broadcast_params(); #endif return finished; } #ifdef MPI /**************************************************************************** * * Broadcast all parameters to other CPUs (MPI only) * *****************************************************************************/ void broadcast_params() { int i, k; #ifdef TWOD vektor nullv = {0,0}; #else vektor nullv = {0,0,0}; #endif MPI_Bcast( &ensemble , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &maxwalltime , 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &hyper_threads,1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &watch_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &stop_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &loop , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &seed , 1, MPI_LONG, 0, MPI_COMM_WORLD); MPI_Bcast( &do_maxwell , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &steps_max , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &steps_min , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &checkpt_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &eng_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &flush_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &pic_int , 1, MPI_INT, 0, MPI_COMM_WORLD); #if defined(FORCE) || defined(WRITEF) MPI_Bcast( &force_int , 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef WRITEF MPI_Bcast( &force_all , 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef DEBUG MPI_Bcast( &force_celldim_divisor, 3, MPI_INT, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &dist_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_dim, DIM, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_ll, DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_ur, DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_Epot_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_Ekin_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_Ekin_long_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_Ekin_trans_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_Ekin_comp_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_press_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_pressoff_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_presstens_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_shock_shear_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_shear_aniso_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_dens_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_vxavg_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &box_from_header, 1, MPI_INT, 0, MPI_COMM_WORLD); //MYMOD MPI_Bcast( &dist_mdtemp_flag, 1, MPI_INT, 0, MPI_COMM_WORLD); //ENDOF MYMOD #ifdef TWOD /* MPI_Bcast( &pic_scale , 2, REAL, 0, MPI_COMM_WORLD); */ MPI_Bcast( &ecut_kin , 2, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ecut_pot , 2, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &pic_res , 2, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &pic_type , 1, MPI_INT, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &pic_ll , DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &pic_ur , DIM, REAL, 0, MPI_COMM_WORLD); #ifdef CLONE MPI_Bcast( &nclones, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &vtypes, 1, MPI_INT, 0, MPI_COMM_WORLD); #ifdef RELAX MPI_Bcast( &ekin_threshold, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fnorm_threshold, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &f_max_threshold, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &delta_epot_threshold, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &sscount, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &nfc, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef FBC if (NULL==fbc_forces) { fbc_forces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_forces) error("Cannot allocate memory for fbc_forces on client."); } MPI_Bcast( fbc_forces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); if (NULL==fbc_beginforces) { fbc_beginforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_beginforces) error("Cannot allocate memory for fbc_beginforces on client."); } MPI_Bcast( fbc_beginforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #ifdef RELAX MPI_Bcast( &max_fbc_int, 1, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==fbc_dforces) { fbc_dforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_dforces) error("Cannot allocate memory for fbc_dforces on client."); } MPI_Bcast( fbc_dforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #else if (NULL==fbc_endforces) { fbc_endforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_endforces) error("Cannot allocate memory for fbc_endforces on client."); } MPI_Bcast( fbc_endforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #endif if (NULL==fbc_df) { fbc_df = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_df) error("Cannot allocate memory for fbc_df on client."); } #endif /*FBC*/ #ifdef BEND if (NULL==fbc_bforces) { fbc_bforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_bforces) error("Cannot allocate memory for fbc_bforces on client."); } MPI_Bcast( fbc_bforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); if (NULL==fbc_beginbforces) { fbc_beginbforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_beginbforces) error("Cannot allocate memory for fbc_beginbforces on client."); } MPI_Bcast( fbc_beginbforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #ifdef RELAX MPI_Bcast( &max_bfbc_int, 1, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==fbc_bdforces) { fbc_bdforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_bdforces) error("Cannot allocate memory for fbc_bdforces on client."); } MPI_Bcast( fbc_bdforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #else if (NULL==fbc_endbforces) { fbc_endbforces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_endbforces) error("Cannot allocate memory for fbc_endbforces on client."); } MPI_Bcast( fbc_endbforces, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #endif if (NULL==fbc_bdf) { fbc_bdf = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==fbc_bdf) error("Cannot allocate memory for fbc_bdf on client."); } if (NULL==bend_forces) { bend_forces = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==bend_forces) error("Cannot allocate memory for bend_forces on client."); } #endif /* BEND */ #ifdef ZAPP MPI_Bcast( &zapp_threshold, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef FLAGEDATOMS MPI_Bcast( &flagedatomstype, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef BEND MPI_Bcast( &bend_nmoments, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &bend_vtype_of_origin, 6, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &bend_vtype_of_force, 6, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==bend_axis) { bend_axis = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_axis) error("Cannot allocate memory for bend_axis vector on client.\n"); } MPI_Bcast( bend_axis, bend_nmoments * DIM, REAL, 0, MPI_COMM_WORLD); /* these variables will be calculated and do not need to be communicated here */ if (NULL==bend_origin) { bend_origin = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_origin) error("Cannot allocate memory for bend_origin vector on client.\n"); } if (NULL==bend_cog) { bend_cog = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_cog) error("Cannot allocate memory for bend_cog vector on client.\n"); } if (NULL==bend_vec) { bend_vec = (vektor *) malloc( bend_nmoments * sizeof(vektor) ); if (NULL==bend_vec) error("Cannot allocate memory for bend_vec vector on client.\n"); } #endif /* BEND */ if (NULL==restrictions) { restrictions = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==restrictions) error("Cannot allocate memory for restriction vectors on client."); } MPI_Bcast( restrictions, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &pbc_dirs , DIM, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &box_x , DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &box_y , DIM, REAL, 0, MPI_COMM_WORLD); #ifndef TWOD MPI_Bcast( &box_z , DIM, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &box_param, DIM, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &size_per_cpu, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &box_unit, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ntypes, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ntypepairs, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ntypetriples, 1, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==masses) { masses = (real *) malloc( ntypes * sizeof(real) ); if (NULL==masses) error("Cannot allocate memory for masses array\n"); } MPI_Bcast( masses, ntypes, REAL, 0, MPI_COMM_WORLD); if (NULL==gtypes) { gtypes = (int *) malloc( ntypes * sizeof(int) ); if (NULL==gtypes) error("Cannot allocate memory for types array\n"); } MPI_Bcast( gtypes, ntypes, MPI_INT, 0, MPI_COMM_WORLD); #ifdef NBLIST MPI_Bcast( &nbl_margin, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nbl_size, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef VEC MPI_Bcast( &atoms_per_cpu, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef EFILTER if (NULL==lower_e_pot) { lower_e_pot = (real *) calloc(ntypes, sizeof(real)); if (NULL==lower_e_pot) error("Cannot allocate memory for lower_e_pot\n"); } if (NULL==upper_e_pot) { upper_e_pot = (real *) calloc(ntypes, sizeof(real)); if (NULL==upper_e_pot) error("Cannot allocate memory for upper_e_pot\n"); } MPI_Bcast( lower_e_pot, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( upper_e_pot, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ef_checkpt_int, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef NNBR if (NULL==lower_nb_cut) { lower_nb_cut = (int *) calloc(ntypes, sizeof(int)); if (NULL==lower_nb_cut) error("Cannot allocate memory for lower_nb_cut\n"); } if (NULL==upper_nb_cut) { upper_nb_cut = (int *) calloc(ntypes, sizeof(int)); if (NULL==upper_nb_cut) error("Cannot allocate memory for upper_nb_cut\n"); } if (NULL==nb_r2_cut) { nb_r2_cut = (real *) calloc(ntypes*ntypes, sizeof(real)); if (NULL==nb_r2_cut) error("Cannot allocate memory for nb_r2_cut\n"); } MPI_Bcast( lower_nb_cut, ntypes, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( upper_nb_cut, ntypes, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( nb_r2_cut, ntypes*ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nb_checkpt_int, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &timestep , 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &temperature , 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &use_curr_temp, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cpu_dim , DIM, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &parallel_output, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &outputgrpsize, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &parallel_input, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &msgbuf_size, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &binary_output, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( outfilename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( infilename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( potfilename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); #ifdef TTBP MPI_Bcast( ttbp_potfilename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); #endif #ifdef EAM2 MPI_Bcast( eam2_emb_E_filename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( eam2_at_rho_filename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); #ifdef EEAM MPI_Bcast( eeam_mod_E_filename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); #endif #endif #ifdef ADP MPI_Bcast( adp_upotfile, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( adp_wpotfile, 255, MPI_CHAR, 0, MPI_COMM_WORLD); #endif #ifdef MEAM MPI_Bcast( meam_emb_E_filename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( meam_eldensity_filename,255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( meam_t1, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_t2, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_t3, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_f0, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_r0, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_beta0, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_beta1, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_beta2, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_beta3, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_rcut_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_deltar_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_cmin_lin, ntypetriples, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_cmax_lin, ntypetriples, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_e, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_a, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( meam_rho0, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &meam_t_average, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &have_pre_embed_pot, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &have_potfile, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &have_eldensity_file, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &have_embed_potfile, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef TEMPCONTROL MPI_Bcast( &end_temp, 1, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &cellsz, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &initsz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &incrsz, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &outbuf_size, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &inbuf_size, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dist_chunk_size, 1, MPI_INT, 0, MPI_COMM_WORLD); #ifdef AND MPI_Bcast( &tempintv, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef BER MPI_Bcast( &tauber, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef VISCOUS MPI_Bcast( &viscous_friction, 1, REAL, 0, MPI_COMM_WORLD); #endif #if defined(NVT) || defined(NPT) || defined(STM) MPI_Bcast( &eta , 1 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &isq_tau_eta , 1 , REAL, 0, MPI_COMM_WORLD); #ifdef UNIAX MPI_Bcast( &eta_rot , 1 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &isq_tau_eta_rot , 1 , REAL, 0, MPI_COMM_WORLD); #endif #endif #if defined(STM) || defined(FRAC) || defined(FTG) MPI_Bcast( &stadium, 2 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &stadium2, 2 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &center, 2 , REAL, 0, MPI_COMM_WORLD); #endif #ifdef DAMP MPI_Bcast( &stadium, 3 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &stadium2, 3 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &center, 3 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &damptemp, 1 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &delta_finnis, 1 , REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &zeta_0, 1 , REAL, 0, MPI_COMM_WORLD); #endif #ifdef NPT MPI_Bcast( &xi, DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &isq_tau_xi, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &pressure_ext, DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &use_curr_pressure, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &pressure_end, DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cell_size_tolerance, 1, REAL, 0, MPI_COMM_WORLD); #endif #if defined(CORRELATE) MPI_Bcast( &correl_omode, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &correl_tmax, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &correl_rmax, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #if defined(CORRELATE) || defined(MSQD) MPI_Bcast( &correl_start, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &correl_end, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &correl_ts, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &correl_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &msqd_ntypes, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &msqd_vtypes, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef NMOLDYN MPI_Bcast( &nmoldyn_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &nmoldyn_veloc, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef DSF MPI_Bcast( &dsf_int, 1, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==dsf_weight) { dsf_weight = (real *) malloc( ntypes * sizeof(real) ); if (NULL==dsf_weight) error("cannot allocate dsf_weight"); } MPI_Bcast( dsf_weight, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &dsf_nk, 1, MPI_INT, 0, MPI_COMM_WORLD); if ((myid>0) && (dsf_nk>0)) { dsf_k0 = (int *) malloc( DIM * dsf_nk * sizeof(int) ); dsf_kdir = (int *) malloc( DIM * dsf_nk * sizeof(int) ); dsf_kmax = (int *) malloc( dsf_nk * sizeof(int) ); if ((NULL==dsf_k0) || (NULL==dsf_kdir) || (NULL==dsf_kmax)) error("cannot allocate dsf arrays"); } MPI_Bcast( dsf_k0, DIM*dsf_nk, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( dsf_kdir, DIM*dsf_nk, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( dsf_kmax, dsf_nk, MPI_INT, 0, MPI_COMM_WORLD); #endif #if defined(HC) || defined(NVX) MPI_Bcast( &hc_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &hc_start, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef HC MPI_Bcast( &hc_av_start, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef NVX MPI_Bcast( &hc_nlayers, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &hc_count, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &hc_heatcurr, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef LASER MPI_Bcast( &laser_rescale_mode,1,MPI_INT,0,MPI_COMM_WORLD); MPI_Bcast( &laser_dir, DIM , MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_mu, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_delta_temp, 1, REAL, 0, MPI_COMM_WORLD); #ifndef FDTD MPI_Bcast( &laser_sigma_e, 1, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &laser_sigma_t, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_t_0, 1, REAL, 0, MPI_COMM_WORLD); //MPI_Bcast( &laser_sigma_e1, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_sigma_t1, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_t_1, 1, REAL, 0, MPI_COMM_WORLD); #ifdef LASERYZ MPI_Bcast( &laser_sigma_w_y, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_sigma_w_z, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_sigma_w0, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_tem_mode, 3, MPI_INT, 0, MPI_COMM_WORLD); #endif #endif #ifdef PDECAY MPI_Bcast( &xipdecay, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ramp_start,1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ramp_end,1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &pdecay_mode, 1, REAL, 0, MPI_COMM_WORLD); //MY MOD: Hotfix fuer pdecay für +/-y MPI_Bcast( &ramp_y0max, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ramp_y1max, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ramp_y0min, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ramp_y1min, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &pdecay_surfx, 1, REAL, 0, MPI_COMM_WORLD); #endif //MY MOD: Non-refl. bnd-conds. #ifdef NRB MPI_Bcast( &nrb_alat, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nrb_eps, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nrbk, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nrb_overwrite, 1, MPI_INT, 0, MPI_COMM_WORLD); // MPI_Bcast( &nrb_input_file,255, MPI_CHAR, 0, MPI_COMM_WORLD); //nicht noetig, da sowieso nur proc 0 einliest! MPI_Bcast( &nrb_readfile, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif //MY MOD shiftx_front, shiftx_rear MPI_Bcast (&shiftx_front, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast (&shiftx_rear,1,REAL,0,MPI_COMM_WORLD); MPI_Bcast (&shifty_front, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast (&shifty_rear,1,REAL,0,MPI_COMM_WORLD); MPI_Bcast (&shiftz_front, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast (&shiftz_rear,1,REAL,0,MPI_COMM_WORLD); //MY MOD: filter auch in y-dir #ifdef FILTER MPI_Bcast( &filter_min_x, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &filter_max_x, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &filter_min_y, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &filter_max_y, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &filter_min_z, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &filter_max_z, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &filter_int, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef TTM //MPI_Bcast( &fd_g, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fd_update_steps,1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fd_ext, DIM, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fd_one_d, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fd_c, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fd_gamma, 1, REAL, 0, MPI_COMM_WORLD); //MPI_Bcast( &fd_k, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fd_n_timesteps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ttm_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &init_t_el, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fix_t_el, 1, MPI_INT, 0, MPI_COMM_WORLD); /////////MY MOD MPI_Bcast( &fd_min_atoms, 1, MPI_INT, 0, MPI_COMM_WORLD); #if defined(FDTD) || defined(LASER) || defined(TMM) MPI_Bcast( &lambda, 1, REAL,0, MPI_COMM_WORLD); #endif MPI_Bcast( &atomic_weight, 1, REAL,0, MPI_COMM_WORLD); MPI_Bcast( &atomic_charge, 1, REAL,0, MPI_COMM_WORLD); #if defined(FDTD) || defined(LASER) || defined(TMM) MPI_Bcast( &I0, 1, REAL,0, MPI_COMM_WORLD); MPI_Bcast( &laser_sigma_t, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_t_0, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_sigma_t1, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &laser_t_1, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lambda, 1, REAL,0, MPI_COMM_WORLD); #ifdef TMM MPI_Bcast( &tmm_absorption_threshold, 1, REAL,0, MPI_COMM_WORLD); #endif #endif #ifdef TTM1D MPI_Bcast( &ttmdimx, 1, MPI_INT,0, MPI_COMM_WORLD); MPI_Bcast( &vlatdim, 1, MPI_INT,0, MPI_COMM_WORLD); MPI_Bcast( &vlatbuffer, 1, MPI_INT,0, MPI_COMM_WORLD); #endif #ifdef FDTD MPI_Bcast( &srcw, 1, REAL,0, MPI_COMM_WORLD); MPI_Bcast( &srcx, 1, REAL,0, MPI_COMM_WORLD); MPI_Bcast( &bw, 1, MPI_INT,0, MPI_COMM_WORLD); MPI_Bcast( &Sc, 1, REAL,0,MPI_COMM_WORLD); #endif //FDTD #endif /* TTM */ //MYMOD: LOCAL ORDER PARAM #ifdef LOD MPI_Bcast( &alat, 1, REAL,0, MPI_COMM_WORLD); MPI_Bcast( &lodnorm, 1, REAL,0, MPI_COMM_WORLD); #endif #ifdef DIRICHLET MPI_Bcast(&dirichlet_surfx,1,REAL,0,MPI_COMM_WORLD); #endif #ifdef STRESS_TENS MPI_Bcast( &press_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &presstens_ext, DIM*(DIM+1)/2, REAL, 0, MPI_COMM_WORLD); #endif #if defined(FRAC) || defined(FTG) MPI_Bcast( &dotepsilon0 , 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &expansionmode , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &gamma_bar , 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &gamma_min , 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &gamma_damp , 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &dampingmode , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &delta_ftg , 1, REAL , 0, MPI_COMM_WORLD); #endif #ifdef FTG MPI_Bcast( &Tleft, 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &Tright, 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &nslices, 1, MPI_INT , 0, MPI_COMM_WORLD); MPI_Bcast( &nslices_Left, 1, MPI_INT , 0, MPI_COMM_WORLD); MPI_Bcast( &nslices_Right, 1, MPI_INT , 0, MPI_COMM_WORLD); if (NULL==ninslice) { ninslice = (int *) malloc(nslices*sizeof(int)); if (NULL==ninslice) error("Cannot allocate memory for ninslice vector on client.\n"); } if (NULL==E_kin_ftg) { E_kin_ftg = (real *) malloc(nslices*sizeof(real)); if (NULL==E_kin_ftg) error("Cannot allocate memory for E_kin_ftg vector on client.\n"); } if (NULL==gamma_ftg) { gamma_ftg = (real *) malloc(nslices*sizeof(real)); if (NULL==gamma_ftg) error("Cannot allocate memory for gamma_ftg vector on client.\n"); for (i=0;i<nslices;i++) gamma_ftg[i] = 0.0; } MPI_Bcast( gamma_ftg, nslices, REAL, 0, MPI_COMM_WORLD); #endif #ifdef FINNIS MPI_Bcast( &delta_finnis , 1, REAL , 0, MPI_COMM_WORLD); MPI_Bcast( &zeta_0 , 1, REAL , 0, MPI_COMM_WORLD); #endif #ifdef GLOK MPI_Bcast( &glok_ekin_threshold, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef MIX MPI_Bcast( &glok_mix, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &glok_mixdec, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef ADAPTGLOK MPI_Bcast( &glok_fmaxcrit, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &glok_incfac, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &glok_decfac, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &glok_maxtimestep, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &starttimestep, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &glok_minsteps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &min_nPxF, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &glok_int, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef RIGID MPI_Bcast( &nsuperatoms, 1, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==superatom) { superatom = (int *) malloc( vtypes * sizeof(int) ); if (NULL==superatom) error("Cannot allocate memory for superatom on client."); else for (k=0; k<vtypes; k++) superatom[k] = -1; } MPI_Bcast( superatom, vtypes, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==superrestrictions) { superrestrictions = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==superrestrictions) error("Cannot allocate memory for superrestrictions on client."); else for (k=0; k<vtypes; k++) superrestrictions[k] = nullv; } MPI_Bcast( superrestrictions, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); if (NULL==superforce) superforce = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==superforce) error("Cannot allocate memory for superforce on client."); #endif #ifdef DEFORM MPI_Bcast( &max_deform_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &deform_size, 1, REAL, 0, MPI_COMM_WORLD); if (NULL==deform_shift) { deform_shift = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==deform_shift) error("Cannot allocate memory for deform_shift on client."); } MPI_Bcast( deform_shift, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); if (NULL==shear_def) { shear_def = (int *) malloc( vtypes * sizeof(int) ); if (NULL==shear_def) error("Cannot allocate memory for shear_def on client."); for (i=0; i<vtypes; i++) shear_def[i] = 0; } MPI_Bcast( shear_def, vtypes, MPI_INT, 0, MPI_COMM_WORLD); if (NULL==deform_shear) { deform_shear = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==deform_shear) error("Cannot allocate memory for deform_shear on client."); } MPI_Bcast( deform_shear, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); if (NULL==deform_base) { deform_base = (vektor *) malloc( vtypes * sizeof(vektor) ); if (NULL==deform_base) error("Cannot allocate memory for deform_base on client."); } MPI_Bcast( deform_base, vtypes * DIM, REAL, 0, MPI_COMM_WORLD); #endif #ifdef CYCLE MPI_Bcast( &lindef_freq, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef HOMDEF MPI_Bcast( &lindef_size, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lindef_int , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &lindef_x, DIM, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lindef_y, DIM, REAL, 0, MPI_COMM_WORLD); #ifndef TWOD MPI_Bcast( &lindef_z, DIM, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &shear_module, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &bulk_module, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &relax_rate, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &relax_mode, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #if defined(HOMDEF) || defined(NPT_axial) MPI_Bcast( &relax_dirs, DIM, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef SHOCK MPI_Bcast( &shock_strip, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &shock_speed, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &shock_speed_l, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &shock_speed_r, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &shock_incr, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &shock_mode, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef CNA MPI_Bcast( &cna_start, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_end, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_rcut, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_ll, 3, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_ur, 3, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_writev, 8, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_write_n, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_write_statistics, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_cristv, 4, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cna_crist_n, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef DISLOC MPI_Bcast( &min_dpot, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &min_dsp2, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &dem_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dsp_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &calc_Epot_ref, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &reset_Epot_step, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &Epot_diff, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef AVPOS MPI_Bcast( &avpos_start, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &avpos_end, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &avpos_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &avpos_res, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &avpos_steps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &avpos_nwrites, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &avpos_npwrites, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef ATDIST error("Option ATDIST is not supported under MPI"); #endif #ifdef DIFFPAT error("Option DIFFPAT is not supported under MPI"); #endif #ifdef ORDPAR if (NULL==op_r2_cut) { op_r2_cut = (real *) calloc(ntypes*ntypes, sizeof(real)); if (NULL==op_r2_cut) error("Cannot allocate memory for op_r2_cut\n"); } MPI_Bcast( &op_r2_cut, ntypes*ntypes, REAL, 0, MPI_COMM_WORLD); if (NULL==op_weight) { op_weight = (real *) calloc(ntypes*ntypes, sizeof(real)); if (NULL==op_weight) error("Cannot allocate memory for op_weight\n"); } MPI_Bcast( &op_weight, ntypes*ntypes, REAL, 0, MPI_COMM_WORLD); #endif #ifdef CG MPI_Bcast( &cg_fr, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cg_reset_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &linmin_maxsteps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &linmin_tol, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &linmin_dmax, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &linmin_dmin, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cg_glimit, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cg_zeps, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &cg_infolevel, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cg_mode, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef ACG MPI_Bcast( &acg_init_alpha, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &acg_decfac, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &acg_incfac, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef SOCKET_IO MPI_Bcast( &socket_int, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef UNIAX MPI_Bcast( &uniax_inert, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &uniax_sig, 3, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &uniax_eps, 3, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &uniax_r_cut, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &uniax_r2_cut, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef PAIR /* analytically defined potentials */ MPI_Bcast( &have_pre_pot, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &have_potfile, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( r_cut_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( r_begin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( pot_res, ntypepairs, REAL, 0, MPI_COMM_WORLD); /* Lennard-Jones */ MPI_Bcast( lj_epsilon_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( lj_sigma_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); /* Gauss Part of Lennard-Jones-Gauss */ MPI_Bcast( ljg_eps_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ljg_r0_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ljg_sig_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); /* Morse */ MPI_Bcast( morse_epsilon_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( morse_sigma_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( morse_alpha_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); /* Buckingham */ MPI_Bcast( buck_a_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( buck_c_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( buck_sigma_lin, ntypepairs, REAL, 0, MPI_COMM_WORLD); /* harmonic potential for shell model */ MPI_Bcast( spring_const, ntypepairs-ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fix_bks, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef KIM MPI_Bcast( kim_model_name, 255, MPI_CHAR, 0, MPI_COMM_WORLD); if (NULL == kim_el_names) kim_el_names = (char **)calloc(ntypes, sizeof(char *)); for (i=0;i<ntypes;i++) { if (NULL==kim_el_names[i]) kim_el_names[i] = (char *)calloc(3, sizeof(char)); MPI_Bcast(kim_el_names[i], 3, MPI_CHAR, 0, MPI_COMM_WORLD); } #endif #ifdef FEFL /* harmonic potential for Einstein crystal */ MPI_Bcast( spring_rate, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lambda, 1, REAL, 0, MPI_COMM_WORLD); #endif #if defined(COVALENT) || defined(NNBR_TABLE) MPI_Bcast( &neigh_len, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef TTBP MPI_Bcast( ttbp_constant, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ttbp_constant2, 8, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ttbp_sp, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ttbp_cut, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ttbp_vas, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef SLLOD MPI_Bcast(&shear_rate, 2, REAL, 0, MPI_COMM_WORLD); #endif #ifdef STIWEB MPI_Bcast( stiweb_a, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_b, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_p, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_q, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_a1, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_de, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_a2, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_ga, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( stiweb_la, ntypes*ntypepairs, REAL, 0, MPI_COMM_WORLD); #endif #if defined(TERSOFF) || defined(TERSOFFMOD) || defined(BRENNER) #if defined(TERSOFF2) || defined(TERSOFFMOD2) || defined(BRENNER) nvalues = ntypepairs; #else nvalues = ntypes; #endif MPI_Bcast( ters_r_cut, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_r0, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_a, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_b, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_la, ntypepairs, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_mu, ntypepairs, REAL, 0, MPI_COMM_WORLD); #if defined(TERSOFF) || defined(BRENNER) MPI_Bcast( ters_chi, ntypepairs-ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_om, ntypepairs-ntypes, REAL, 0, MPI_COMM_WORLD); /* nvalues is ntypes for TERSOFF and ntypepairs for TERSOFF2 */ MPI_Bcast( ters_ga, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_n, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_c, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_d, nvalues, REAL, 0, MPI_COMM_WORLD); #else /* TERSOFFMOD */ MPI_Bcast( ters_eta, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_delta, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_alpha, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_beta, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_c1, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_c2, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_c3, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_c4, nvalues, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ters_c5, nvalues, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast( ters_h, nvalues, REAL, 0, MPI_COMM_WORLD); #endif #ifdef BRENNER #endif #ifdef KEATING MPI_Bcast( keating_r_cut, ntypepairs, REAL, 0,MPI_COMM_WORLD); MPI_Bcast( keating_alpha, ntypepairs, REAL, 0,MPI_COMM_WORLD); MPI_Bcast( keating_d, ntypepairs, REAL, 0,MPI_COMM_WORLD); MPI_Bcast( keating_beta, ntypes*ntypepairs, REAL, 0,MPI_COMM_WORLD); #endif #if defined(EWALD) || defined(COULOMB) || defined(USEFCS) || defined(SM) MPI_Bcast( charge, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &coul_eng, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef SM MPI_Bcast( &sm_fixed_charges, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( sm_chi_0, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( sm_J_0, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( sm_Z, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( sm_zeta, ntypes, REAL, 0, MPI_COMM_WORLD); #endif #ifdef USEFCS MPI_Bcast( &fcs_method, 1, MPI_INT, 0, MPI_COMM_WORLD); #ifdef PAIR MPI_Bcast( &fcs_near_field_flag,1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_rcut, 1, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast( &fcs_tolerance, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_grid_dim, 3, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_max_iter, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_iter_tolerance, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_pepc_eps, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_pepc_theta, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_pepc_nthreads, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_fmm_absrel, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_fmm_dcorr, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_fmm_do_tune , 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_vmg_max_level, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_vmg_smooth_steps,1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_vmg_gamma, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_vmg_near_field_cells,1,MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_vmg_interpol_order,1,MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_vmg_discr_order,1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_pp3mg_ghosts, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_pp3mg_degree, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_pp3mg_max_part, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_p2nfft_intpol_order, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &fcs_p2nfft_epsI, 1, REAL, 0, MPI_COMM_WORLD); #endif #if defined(EWALD) || defined(COULOMB) MPI_Bcast( &ew_kappa, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ew_r2_cut, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ew_kcut, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ew_test, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ew_nmax, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &coul_res, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &coul_begin, 1, REAL, 0, MPI_COMM_WORLD); #endif #if defined(DIPOLE) || defined(KERMODE) MPI_Bcast( &dp_fix, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &dp_mix, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &dp_tol, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &dp_self, 1, REAL, 0, MPI_COMM_WORLD); if (NULL==dp_b) { dp_b = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==dp_b) error("Cannot allocate memory for dp_b on client."); } MPI_Bcast( dp_b, ntypepairs, REAL, 0, MPI_COMM_WORLD); if (NULL==dp_c) { dp_c = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==dp_c) error("Cannot allocate memory for dp_c on client."); } MPI_Bcast( dp_c, ntypepairs, REAL, 0, MPI_COMM_WORLD); if (NULL==dp_alpha) { dp_alpha = (real *) malloc( ntypes * sizeof(real) ); if (NULL==dp_alpha) error("Cannot allocate memory for dp_alpha on client."); } MPI_Bcast( dp_alpha, ntypes, REAL, 0, MPI_COMM_WORLD); #endif #if defined(DIPOLE) || defined(KERMODE) || defined(MORSE) if (NULL==ms_D) { ms_D = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==ms_D) error("Cannot allocate memory for ms_D on client."); } MPI_Bcast( ms_D, ntypepairs, REAL, 0, MPI_COMM_WORLD); if (NULL==ms_gamma) { ms_gamma = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==ms_gamma) error("Cannot allocate memory for ms_gamma on client."); } MPI_Bcast( ms_gamma, ntypepairs, REAL, 0, MPI_COMM_WORLD); if (NULL==ms_harm_c) { ms_harm_c = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==ms_harm_c) error("Cannot allocate memory for ms_harm_c on client."); } MPI_Bcast( ms_harm_c, ntypepairs, REAL, 0, MPI_COMM_WORLD); if (NULL==ms_r2_min) { ms_r2_min = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==ms_r2_min) error("Cannot allocate memory for ms_r2_min on client."); } MPI_Bcast( ms_r2_min, ntypepairs, REAL, 0, MPI_COMM_WORLD); if (NULL==ms_r0) { ms_r0 = (real *) malloc( ntypepairs * sizeof(real) ); if (NULL==ms_r0) error("Cannot allocate memory for ms_r0 on client."); } MPI_Bcast( ms_r0, ntypepairs, REAL, 0, MPI_COMM_WORLD); #endif #ifdef KERMODE MPI_Bcast( &yuk_beta, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &yuk_smoothlength, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ke_rcut, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef EPITAX MPI_Bcast( epitax_rate, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( epitax_type, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( epitax_mass, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( epitax_temp, ntypes, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &epitax_cutoff, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &epitax_maxsteps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &epitax_startstep, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &epitax_ctrl, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &epitax_height, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &epitax_speed, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef RELAX MPI_Bcast( &max_sscount, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif #ifdef EXTPOT MPI_Bcast( &have_extpotfile, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( extpotfilename, 255, MPI_CHAR, 0, MPI_COMM_WORLD); MPI_Bcast( &ep_n, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ep_key, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ep_nind, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ep_max_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( ep_pos, 3*ep_n, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ep_vel, 3*ep_n, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( ep_dir, 3*ep_n, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ep_a, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ep_rcut, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef CBE MPI_Bcast( &num_spus, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &num_bufs, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cbe_pot_steps, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cbe_pot_max, 1, REAL, 0, MPI_COMM_WORLD); #endif MPI_Bcast(&use_header,1, MPI_INT, 0, MPI_COMM_WORLD); /* broadcast integrator to other CPUs */ switch (ensemble) { case ENS_NVE: move_atoms = move_atoms_nve; break; case ENS_TTM: move_atoms = move_atoms_ttm; break; case ENS_MIK: move_atoms = move_atoms_mik; break; case ENS_NVT: move_atoms = move_atoms_nvt; break; case ENS_NPT_ISO: move_atoms = move_atoms_npt_iso; break; case ENS_NPT_AXIAL: move_atoms = move_atoms_npt_axial; break; case ENS_GLOK: move_atoms = move_atoms_nve; break; case ENS_FRAC: move_atoms = move_atoms_frac; break; case ENS_SLLOD: move_atoms = move_atoms_sllod; break; case ENS_NVX: move_atoms = move_atoms_nvx; break; case ENS_STM: move_atoms = move_atoms_stm; break; case ENS_FTG: move_atoms = move_atoms_ftg; break; case ENS_FINNIS: move_atoms = move_atoms_finnis; break; case ENS_CG: break; default: if (0==myid) error("unknown ensemble in broadcast"); break; } #ifdef LASER /* broadcast laser rescaling routine to other CPUs */ switch (laser_rescale_mode) { case 0: do_laser_rescale = laser_rescale_dummy; break; case 1: do_laser_rescale = laser_rescale_1; break; case 2: do_laser_rescale = laser_rescale_2; break; case 3: do_laser_rescale = laser_rescale_3; break; #ifdef TTM case 4: do_laser_rescale = laser_rescale_ttm; break; #endif default: if (0==myid) error("unknown laser rescaling mode in broadcast"); break; } #ifdef LASERYZ switch ( laser_tem_mode.x ) /* Gauss Laguerre = 0, Hermite = 1 */ { case 0: { switch ( laser_tem_mode.y ) { case 0: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_00; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_01; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_02; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_03; break; } } break; case 1: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_10; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_11; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_12; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_13; break; } } break; case 2: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_20; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_21; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_22; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_23; break; } } break; case 3: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_laguerre_30; break; case 1: laser_intensity_profile = laser_intensity_profile_laguerre_31; break; case 2: laser_intensity_profile = laser_intensity_profile_laguerre_32; break; case 3: laser_intensity_profile = laser_intensity_profile_laguerre_33; break; } } break; } } break; case 1: { switch ( laser_tem_mode.y ) { case 0: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_00; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_01; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_02; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_03; break; } } break; case 1: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_10; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_11; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_12; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_13; break; } } break; case 2: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_20; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_21; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_22; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_23; break; } } break; case 3: { switch ( laser_tem_mode.z ) { case 0: laser_intensity_profile = laser_intensity_profile_hermite_30; break; case 1: laser_intensity_profile = laser_intensity_profile_hermite_31; break; case 2: laser_intensity_profile = laser_intensity_profile_hermite_32; break; case 3: laser_intensity_profile = laser_intensity_profile_hermite_33; break; } } break; } } break; } #endif /* LASERYZ */ #endif /* LASER */ #ifdef ADA MPI_Bcast( &ada_nbr_r2cut, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &ada_write_int, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ada_crystal_structure, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ada_default_type, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &ada_latticeConst, 1, REAL, 0, MPI_COMM_WORLD); #endif #ifdef NYETENSOR MPI_Bcast( &nye_rotationAxis_x, 3, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nye_rotationAxis_y, 3, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &nye_rotationAxis_z, 3, REAL, 0, MPI_COMM_WORLD); #endif #ifdef LOADBALANCE MPI_Bcast( &lb_contractionRate, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_frequency, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_writeStatus, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_maxLoadTolerance, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_maxLoadToleranceFactorForReset, 1, REAL, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_preRuns, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_iterationsPerReset, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_minStepsBetweenReset, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &lb_balancingType, 1, MPI_INT, 0, MPI_COMM_WORLD); #endif } #endif /* MPI */
37.618855
148
0.634155
[ "geometry", "vector", "model" ]
a2dfb11ccd04f61fe7f1cb96dddec3037c4358d4
14,609
h
C
src/navier_stokes/impose_impenetrability_element.h
pkeuchel/oomph-lib
37c1c61425d6b9ea1c2ddceef63a68a228af6fa4
[ "RSA-MD" ]
4
2020-11-16T12:25:09.000Z
2021-06-29T08:53:25.000Z
src/navier_stokes/impose_impenetrability_element.h
pkeuchel/oomph-lib
37c1c61425d6b9ea1c2ddceef63a68a228af6fa4
[ "RSA-MD" ]
2
2020-05-05T22:41:37.000Z
2020-05-10T14:14:17.000Z
src/navier_stokes/impose_impenetrability_element.h
pkeuchel/oomph-lib
37c1c61425d6b9ea1c2ddceef63a68a228af6fa4
[ "RSA-MD" ]
3
2021-01-31T14:09:20.000Z
2021-06-07T07:20:51.000Z
// LIC// ==================================================================== // LIC// This file forms part of oomph-lib, the object-oriented, // LIC// multi-physics finite-element library, available // LIC// at http://www.oomph-lib.org. // LIC// // LIC// Copyright (C) 2006-2021 Matthias Heil and Andrew Hazel // LIC// // LIC// This library is free software; you can redistribute it and/or // LIC// modify it under the terms of the GNU Lesser General Public // LIC// License as published by the Free Software Foundation; either // LIC// version 2.1 of the License, or (at your option) any later version. // LIC// // LIC// This library is distributed in the hope that it will be useful, // LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of // LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // LIC// Lesser General Public License for more details. // LIC// // LIC// You should have received a copy of the GNU Lesser General Public // LIC// License along with this library; if not, write to the Free Software // LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA // LIC// 02110-1301 USA. // LIC// // LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk. // LIC// // LIC//==================================================================== #ifndef OOMPH_IMPOSE_IMPENETRABLE_ELEMENTS_HEADER #define OOMPH_IMPOSE_IMPENETRABLE_ELEMENTS_HEADER // Config header generated by autoconfig #ifdef HAVE_CONFIG_H #include <oomph-lib-config.h> #endif namespace oomph { //======================================================================== /// ImposeImpenetrabilityElement /// are elements that coincide with the faces of /// higher-dimensional "bulk" elements. They are used on /// boundaries where we would like to impose impenetrability. //======================================================================== template<class ELEMENT> class ImposeImpenetrabilityElement : public virtual FaceGeometry<ELEMENT>, public virtual FaceElement { private: /// Lagrange Id unsigned Id; public: /// Constructor takes a "bulk" element, the /// index that identifies which face the /// ImposeImpenetrabilityElement is supposed /// to be attached to, and the face element ID ImposeImpenetrabilityElement(FiniteElement* const& element_pt, const int& face_index, const unsigned& id = 0) : FaceGeometry<ELEMENT>(), FaceElement() { // set the Id Id = id; // Build the face element element_pt->build_face_element(face_index, this); // we need 1 additional values for each FaceElement node Vector<unsigned> n_additional_values(nnode(), 1); // add storage for lagrange multipliers and set the map containing // the position of the first entry of this face element's // additional values. add_additional_values(n_additional_values, id); } /// Fill in the residuals void fill_in_contribution_to_residuals(Vector<double>& residuals) { // Call the generic routine with the flag set to 0 fill_in_generic_contribution_to_residuals_parall_lagr_multiplier( residuals, GeneralisedElement::Dummy_matrix, 0); } /// Fill in contribution from Jacobian void fill_in_contribution_to_jacobian(Vector<double>& residuals, DenseMatrix<double>& jacobian) { // Call the generic routine with the flag set to 1 fill_in_generic_contribution_to_residuals_parall_lagr_multiplier( residuals, jacobian, 1); } /// Overload the output function void output(std::ostream& outfile) { FiniteElement::output(outfile); } /// Output function: x,y,[z],u,v,[w],p in tecplot format void output(std::ostream& outfile, const unsigned& nplot) { FiniteElement::output(outfile, nplot); } /// The "global" intrinsic coordinate of the element when /// viewed as part of a geometric object should be given by /// the FaceElement representation, by default /// This final over-ride is required because both SolidFiniteElements /// and FaceElements overload zeta_nodal double zeta_nodal(const unsigned& n, const unsigned& k, const unsigned& i) const { return FaceElement::zeta_nodal(n, k, i); } protected: /// Helper function to compute the residuals and, if flag==1, the /// Jacobian void fill_in_generic_contribution_to_residuals_parall_lagr_multiplier( Vector<double>& residuals, DenseMatrix<double>& jacobian, const unsigned& flag) { // Find out how many nodes there are unsigned n_node = nnode(); // Dimension of element unsigned dim_el = dim(); // Set up memory for the shape functions Shape psi(n_node); // Set the value of n_intpt unsigned n_intpt = integral_pt()->nweight(); // to store local equation number int local_eqn = 0; int local_unknown = 0; // to store normal vector Vector<double> norm_vec(dim_el + 1); // get the value at which the velocities are stored Vector<unsigned> u_index(dim_el + 1); ELEMENT* el_pt = dynamic_cast<ELEMENT*>(this->bulk_element_pt()); for (unsigned i = 0; i < dim_el + 1; i++) { u_index[i] = el_pt->u_index_nst(i); } // Loop over the integration points for (unsigned ipt = 0; ipt < n_intpt; ipt++) { // Get the integral weight double w = integral_pt()->weight(ipt); // Jacobian of mapping double J = J_eulerian_at_knot(ipt); // Premultiply the weights and the Jacobian double W = w * J; // Calculate the shape functions shape_at_knot(ipt, psi); // compute the velocity and the Lagrange multiplier Vector<double> interpolated_u(dim_el + 1, 0.0); double lambda = 0.0; // Loop over nodes for (unsigned j = 0; j < n_node; j++) { // Assemble the velocity component for (unsigned i = 0; i < dim_el + 1; i++) { interpolated_u[i] += nodal_value(j, u_index[i]) * psi(j); } // Cast to a boundary node BoundaryNodeBase* bnod_pt = dynamic_cast<BoundaryNodeBase*>(node_pt(j)); // get the node Node* nod_pt = node_pt(j); // Get the index of the first nodal value associated with // this FaceElement unsigned first_index = bnod_pt->index_of_first_value_assigned_by_face_element(Id); // Assemble the Lagrange multiplier lambda += nod_pt->value(first_index) * psi(j); } // compute the normal vector outer_unit_normal(ipt, norm_vec); // Assemble residuals and jacobian // Loop over the nodes for (unsigned j = 0; j < n_node; j++) { // Cast to a boundary node BoundaryNodeBase* bnod_pt = dynamic_cast<BoundaryNodeBase*>(node_pt(j)); // Get the index of the first nodal value associated with // this FaceElement unsigned first_index = bnod_pt->index_of_first_value_assigned_by_face_element(Id); // Local eqn number for the l-th component of lamdba // in the j-th element local_eqn = nodal_local_eqn(j, first_index); if (local_eqn >= 0) { for (unsigned i = 0; i < dim_el + 1; i++) { // Assemble residual for lagrange multiplier residuals[local_eqn] += interpolated_u[i] * norm_vec[i] * psi(j) * W; // Assemble Jacobian for Lagrange multiplier: if (flag == 1) { // Loop over the nodes again for unknowns for (unsigned jj = 0; jj < n_node; jj++) { // Local eqn number for the i-th component // of the velocity in the jj-th element local_unknown = nodal_local_eqn(jj, u_index[i]); if (local_unknown >= 0) { jacobian(local_eqn, local_unknown) += norm_vec[i] * psi(jj) * psi(j) * W; } } } } } // Loop over the directions for (unsigned i = 0; i < dim_el + 1; i++) { // Local eqn number for the i-th component of the // velocity in the j-th element local_eqn = nodal_local_eqn(j, u_index[i]); if (local_eqn >= 0) { // Add lagrange multiplier contribution to the bulk equation // Add to residual residuals[local_eqn] += norm_vec[i] * psi(j) * lambda * W; // Do Jacobian too? if (flag == 1) { // Loop over the nodes again for unknowns for (unsigned jj = 0; jj < n_node; jj++) { // Cast to a boundary node BoundaryNodeBase* bnode_pt = dynamic_cast<BoundaryNodeBase*>(node_pt(jj)); // Local eqn number for the l-th component of lamdba // in the jj-th element local_unknown = nodal_local_eqn( jj, bnode_pt->index_of_first_value_assigned_by_face_element( Id)); if (local_unknown >= 0) { jacobian(local_eqn, local_unknown) += norm_vec[i] * psi(jj) * psi(j) * W; } } } } } } } } /// The number of "DOF types" that degrees of freedom in this element /// are sub-divided into: Just the solid degrees of freedom themselves. unsigned ndof_types() const { // There is only ever one normal. Plus the constrained velocities. // unsigned ndofndof = 1 + additional_ndof_types(); // std::cout << "ndof: " << ndofndof << std::endl; return (1 + additional_ndof_types()); } unsigned additional_ndof_types() const { // Additional dof types for the constained bulk velocities // two velocities for a 2D problem, 3 for 3D. return (this->dim() + 1); } /// Create a list of pairs for all unknowns in this element, /// so that the first entry in each pair contains the global equation /// number of the unknown, while the second one contains the number /// of the "DOF type" that this unknown is associated with. /// (Function can obviously only be called if the equation numbering /// scheme has been set up.) void get_dof_numbers_for_unknowns( std::list<std::pair<unsigned long, unsigned>>& dof_lookup_list) const { // temporary pair (used to store dof lookup prior to // being added to list) std::pair<unsigned, unsigned> dof_lookup; // number of nodes const unsigned n_node = this->nnode(); // Loop over directions in this Face(!)Element unsigned dim_el = this->dim(); // for(unsigned i=0;i<dim_el;i++) { unsigned i = 0; // Loop over the nodes for (unsigned j = 0; j < n_node; j++) { // Cast to a boundary node BoundaryNodeBase* bnod_pt = dynamic_cast<BoundaryNodeBase*>(node_pt(j)); // Local eqn number: int local_eqn = nodal_local_eqn( j, bnod_pt->index_of_first_value_assigned_by_face_element(Id) + i); if (local_eqn >= 0) { // store dof lookup in temporary pair: First entry in pair // is global equation number; second entry is dof type dof_lookup.first = this->eqn_number(local_eqn); dof_lookup.second = i + additional_ndof_types(); // add to list dof_lookup_list.push_front(dof_lookup); } } } //* // Now we do the bulk elements. Each velocity component of a constrained // dof of a different type of FaceElement has a different dof_type. E.g. // Consider the Navier Stokes equations in three spatial dimensions with // parallel outflow (using ImposeParallelOutflowElement with Boundary_id = // 1) and tangential flow (using ImposeTangentialFlowElement with // Boundary_id = 2) imposed along two different boundaries. There will be // 10 dof types: 0 1 2 3 4 5 6 7 8 9 u v w p u1 v1 w1 u2 v2 w2 // Loop over only the nodes of the "bulk" element that are associated // with this "face" element. // cout << "n_node: " << n_node << endl; unsigned const bulk_dim = dim_el + 1; // cout << "bulk_dim: " << bulk_dim << endl; for (unsigned node_i = 0; node_i < n_node; node_i++) { // Loop over the velocity components for (unsigned velocity_i = 0; velocity_i < bulk_dim; velocity_i++) { // Calculating the offset for this Boundary_id. // 0 1 2 3 4 5 6 7 8 9 // u v w p u1 v1 w1 u2 v2 w2 // // for the first surface mesh, offset = 4 // for the second surface mesh, offset = 7 // unsigned offset = bulk_dim * Boundary_id + 1; // The local equation number is required to check if the value is // pinned, if it is not pinned, the local equation number is required // to get the global equation number. int local_eqn = Bulk_element_pt->nodal_local_eqn( Bulk_node_number[node_i], velocity_i); // ignore pinned values if (local_eqn >= 0) { // store the dof lookup in temporary pair: First entry in pair // is the global equation number; second entry is the dof type dof_lookup.first = Bulk_element_pt->eqn_number(local_eqn); dof_lookup.second = velocity_i; dof_lookup_list.push_front(dof_lookup); // RRRcout << "Face v: " << dof_lookup.first // RRR << ", doftype: " << dof_lookup.second << endl; } // ignore pinned nodes "if(local-eqn>=0)" } // for loop over the velocity components } // for loop over bulk nodes only } // get unknowns... }; } // namespace oomph #endif
36.25062
80
0.578205
[ "mesh", "object", "shape", "vector", "3d", "solid" ]
a2dfffdd0309cca57c3a037a06b9b708979158f7
2,430
h
C
ios/chrome/browser/ui/table_view/cells/table_view_detail_icon_item.h
chromium/chromium
df46e572c3449a4b108d6e02fbe4f6d24cf98381
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
14,668
2015-01-01T01:57:10.000Z
2022-03-31T23:33:32.000Z
ios/chrome/browser/ui/table_view/cells/table_view_detail_icon_item.h
chromium/chromium
df46e572c3449a4b108d6e02fbe4f6d24cf98381
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
86
2015-10-21T13:02:42.000Z
2022-03-14T07:50:50.000Z
ios/chrome/browser/ui/table_view/cells/table_view_detail_icon_item.h
chromium/chromium
df46e572c3449a4b108d6e02fbe4f6d24cf98381
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
5,941
2015-01-02T11:32:21.000Z
2022-03-31T16:35:46.000Z
// Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef IOS_CHROME_BROWSER_UI_TABLE_VIEW_CELLS_TABLE_VIEW_DETAIL_ICON_ITEM_H_ #define IOS_CHROME_BROWSER_UI_TABLE_VIEW_CELLS_TABLE_VIEW_DETAIL_ICON_ITEM_H_ #import <UIKit/UIKit.h> #import "ios/chrome/browser/ui/table_view/cells/table_view_item.h" // TableViewDetailIconItem is a model class that uses TableViewDetailIconCell. @interface TableViewDetailIconItem : TableViewItem // The filename for the leading icon. If empty, no icon will be shown. @property(nonatomic, copy) NSString* iconImageName; // The main text string. @property(nonatomic, copy) NSString* text; // The detail text string. @property(nonatomic, copy) NSString* detailText; // The layout constraint axis at which |text| and |detailText| should be // aligned. In the case of a vertical layout, the text will adapt its font // size to a title/subtitle style. // Defaults to UILayoutConstraintAxisHorizontal. @property(nonatomic, assign) UILayoutConstraintAxis textLayoutConstraintAxis; @end // TableViewDetailIconCell implements an TableViewCell subclass containing an // optional leading icon and two text labels: a "main" label and a "detail" // label. The layout of the two labels is based on |textLayoutConstraintAxis| // defined as either (1) horizontally laid out side-by-side and filling the full // width of the cell or (2) vertically laid out and filling the full height of // the cell. Labels are truncated as needed to fit in the cell. @interface TableViewDetailIconCell : TableViewCell // UILabels corresponding to |text| and |detailText| from the item. @property(nonatomic, readonly, strong) UILabel* textLabel; // The layout constraint axis of the text labels within the cell. Defaults // to a horizontal, edge aligned layout. @property(nonatomic, readwrite, assign) UILayoutConstraintAxis textLayoutConstraintAxis; // Sets the image that should be displayed at the leading edge of the cell. If // set to nil, the icon will be hidden and the text labels will expand to fill // the full width of the cell. - (void)setIconImage:(UIImage*)image; // Sets the detail text. |detailText| can be nil (or empty) to hide the detail // text. - (void)setDetailText:(NSString*)detailText; @end #endif // IOS_CHROME_BROWSER_UI_TABLE_VIEW_CELLS_TABLE_VIEW_DETAIL_ICON_ITEM_H_
40.5
80
0.787654
[ "model" ]
a2e03dd370e16d6cefa7c761a6ad941c4efb5bf0
4,431
h
C
programl/ir/llvm/internal/program_graph_builder.h
xshaun/compiler-programl
f90bcd84700d0f245c80440a3d5fd29370d2f973
[ "Apache-2.0" ]
null
null
null
programl/ir/llvm/internal/program_graph_builder.h
xshaun/compiler-programl
f90bcd84700d0f245c80440a3d5fd29370d2f973
[ "Apache-2.0" ]
null
null
null
programl/ir/llvm/internal/program_graph_builder.h
xshaun/compiler-programl
f90bcd84700d0f245c80440a3d5fd29370d2f973
[ "Apache-2.0" ]
null
null
null
// This file defines a class for constructing program graphs from LLVM modules. // // Copyright 2019-2020 the ProGraML authors. // // Contact Chris Cummins <chrisc.101@gmail.com>. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <utility> #include <vector> #include "absl/container/flat_hash_map.h" #include "labm8/cpp/status.h" #include "labm8/cpp/statusor.h" #include "labm8/cpp/string.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Function.h" #include "llvm/IR/Module.h" #include "programl/graph/program_graph_builder.h" #include "programl/ir/llvm/internal/text_encoder.h" #include "programl/proto/program_graph.pb.h" #include "programl/proto/program_graph_options.pb.h" namespace programl { namespace ir { namespace llvm { namespace internal { // An <entry, exits> pair which records the node numbers for a function's entry // and exit statement nodes, respectively. using FunctionEntryExits = std::pair<Node*, std::vector<Node*>>; // An <entry, exit> pair which records the node numbers of a basic block's entry // and exit statement nodes, respectively. using BasicBlockEntryExit = std::pair<Node*, Node*>; // A <node, position> pair which records a node number and a position argument. using PositionalNode = std::pair<Node*, int32_t>; // A <source_instruction, destination_node> pair which records a data flow // relation from a producer instruction to data element produced. Since // LLVM does not have multi-assignment, the position of all DataEdges is assumed // to be zero. using DataEdge = std::pair<const ::llvm::Instruction*, Node*>; // A map from instructions to their node. using InstructionMap = absl::flat_hash_map<const ::llvm::Instruction*, Node*>; using ArgumentConsumerMap = absl::flat_hash_map<const ::llvm::Argument*, std::vector<PositionalNode>>; // A specialized program graph builder for LLVM-IR. class ProgramGraphBuilder : public programl::graph::ProgramGraphBuilder { public: explicit ProgramGraphBuilder(const ProgramGraphOptions& options) : programl::graph::ProgramGraphBuilder(), options_(options), blockCount_(0){} [[nodiscard]] labm8::StatusOr<ProgramGraph> Build( const ::llvm::Module& module); void Clear(); protected: [[nodiscard]] labm8::StatusOr<FunctionEntryExits> VisitFunction( const ::llvm::Function& function, const Function* functionMessage); [[nodiscard]] labm8::StatusOr<BasicBlockEntryExit> VisitBasicBlock( const ::llvm::BasicBlock& block, const Function* functionMessage, InstructionMap* instructionMap, ArgumentConsumerMap* argumentConsumers, std::vector<DataEdge>* dataEdgesToAdd); [[nodiscard]] labm8::Status AddCallSite(const Node* source, const FunctionEntryExits& target); Node* AddLlvmInstruction(const ::llvm::Instruction* instruction, const Function* function); Node* AddLlvmVariable(const ::llvm::Instruction* operand, const Function* function); Node* AddLlvmVariable(const ::llvm::Argument* argument, const Function* function); Node* AddLlvmConstant(const ::llvm::Constant* constant); private: const ProgramGraphOptions options_; TextEncoder textEncoder_; int32_t blockCount_; // A map from nodes to functions, used to track call sites. This map is // populated by VisitBasicBlock() and consumed once all functions have been // visited. absl::flat_hash_map<Node*, const ::llvm::Function*> callSites_; // A map from constant values to <node, position> uses. This map is // populated by VisitBasicBlock() and consumed once all functions have been // visited. absl::flat_hash_map<const ::llvm::Constant*, std::vector<PositionalNode>> constants_; }; } // namespace internal } // namespace llvm } // namespace ir } // namespace programl
37.235294
80
0.722862
[ "vector" ]
a2e9c788f93858bccfe577eac5c711d26998c11f
4,720
h
C
Code/Projects/Eld/src/Components/wbcompeldplayer.h
xordspar0/eldritch
895639bbb7ccf72e0e8e7a4c5cf72a7915467679
[ "Zlib" ]
4
2019-09-15T20:00:23.000Z
2021-08-27T23:32:53.000Z
Code/Projects/Eld/src/Components/wbcompeldplayer.h
xordspar0/eldritch
895639bbb7ccf72e0e8e7a4c5cf72a7915467679
[ "Zlib" ]
11
2019-01-04T19:50:48.000Z
2021-04-26T08:31:45.000Z
Code/Projects/Eld/src/Components/wbcompeldplayer.h
xordspar0/eldritch
895639bbb7ccf72e0e8e7a4c5cf72a7915467679
[ "Zlib" ]
4
2018-02-27T07:10:51.000Z
2021-04-22T09:38:13.000Z
#ifndef WBCOMPELDPLAYER_H #define WBCOMPELDPLAYER_H #include "wbeldcomponent.h" #include "wbevent.h" #include "vector.h" #include "iinputsystemobserver.h" #include "interpolator.h" class WBEvent; class WBCompEldPlayer : public WBEldComponent, public IInputSystemObserver { public: WBCompEldPlayer(); virtual ~WBCompEldPlayer(); DEFINE_WBCOMP( EldPlayer, WBEldComponent ); virtual bool BelongsInComponentArray() { return true; } virtual void Tick( const float DeltaTime ); virtual int GetTickOrder() { return ETO_TickFirst; } // Should tick before motion is integrated so input is applied ASAP. virtual void HandleEvent( const WBEvent& Event ); virtual void AddContextToEvent( WBEvent& Event ) const; #if BUILD_DEV virtual void DebugRender() const; #endif virtual uint GetSerializationSize(); virtual void Save( const IDataStream& Stream ); virtual void Load( const IDataStream& Stream ); // IInputSystemObserver virtual void OnInputContextsChanged(); SimpleString GetLastDamageDesc() const; bool IsDisablingPause() const { return m_IsDisablingPause; } float GetPlayTime() const; protected: virtual void InitializeFromDefinition( const SimpleString& DefinitionName ); private: #if BUILD_DEV void TEMPHACKInput(); #endif void ConditionalApplyRunningStatMods(); void UseJumpPower( const float JumpPowerImpulse ); void Crouch(); void BeginUncrouch(); void CancelUncrouch(); void TryUncrouch(); bool CanUncrouch(); void Uncrouch(); void RestoreCrouch(); void SetCrouchOverlayHidden( const bool Hidden ); void BeginPowerSlide( const Vector& PowerSlideY ); void EndPowerSlide(); void RestorePowerSlide(); bool ShouldAttachToClimbable( const WBEvent& ClimbEvent ); void IncrementClimbRefs( const WBEvent& ClimbEvent ); void DecrementClimbRefs( const bool AddClimbOffImpulse ); void ZeroClimbRefs(); // Immediately end climb void BeginClimb( const WBEvent& ClimbEvent ); void EndClimb( const bool AddClimbOffImpulse ); void RestoreClimb(); void TryBeginMantle( const Vector& CollisionNormal ); void BeginMantle( const Vector& MantleDestination ); void EndMantle(); void SetSpawnPoint(); void RestoreSpawnPoint(); SimpleString GetDamageDesc( WBEntity* const pDamager, const HashedString& DamageType ) const; void PushPersistence() const; void PullPersistence(); float m_UnlandedJumpWindow; float m_UnlandedLeanWindow; float m_LandAcceleration; float m_AirAcceleration; float m_TurnSpeed; float m_JumpImpulse; bool m_UncrouchOnSprint; // Config bool m_IsCrouched; bool m_IsUncrouching; float m_StandExtentsZ; float m_CrouchExtentsZ; float m_StandViewOffsetZ; float m_CrouchViewOffsetZ; float m_CrouchViewInterpTime; // Config Interpolator<float> m_ViewOffsetZInterpolator; // Transient Interpolator<float> m_ViewOffsetAngleRollInterpolator; // Transient bool m_IsPowerSliding; // Serialized float m_PowerSlideDuration; // Config float m_PowerSlideEndTime; // Serialized (as time remaining) Vector m_PowerSlideY; // Serialized HashedString m_PowerSlideInputContext; // Config float m_PowerSlideReqVelocitySq; // Config float m_PowerSlideRoll; // Config float m_PowerSlideRollInterpTime; // Config int m_ClimbRefs; // Serialized; refcount climbing so we can transfer climbables without issue bool m_Climbing; // Transient HashedString m_ClimbInputContext; float m_UnclimbingGravity; // Serialized float m_ClimbOffImpulse; // Config float m_ClimbFacingBiasAngle; // Config float m_ClimbFacingBiasScale; // Config bool m_IsMantling; // Transient HashedString m_MantleInputContext; // Config float m_MantleVelocity; // Config Vector m_MantleVector; // Transient Vector m_MantleDestination; // Transient bool m_CanMantle; // Transient bool m_IsDisablingPause; // Serialized bool m_HasSetSpawnPoint; // Serialized Vector m_SpawnLocation; // Serialized Angles m_SpawnOrientation; // Serialized SimpleString m_LastDamageDesc; // Transient SimpleString m_DefaultDamageDesc; // Config float m_PreviousPlayTime; // Serialized; the amount of time spent in prior levels, added to GetTime() to get total play time #if BUILD_DEV Array<Vector> m_TEMPHACKPath; bool m_TEMPHACKCamActive; float m_TEMPHACKCamVelocity; float m_TEMPHACKCamGravity; Interpolator<Vector> m_TEMPHACKCamLocation; Interpolator<Angles> m_TEMPHACKCamOrientation; Vector m_TEMPHACKCamStartLocation; Vector m_TEMPHACKCamEndLocation; Angles m_TEMPHACKCamStartOrientation; Angles m_TEMPHACKCamEndOrientation; #endif }; #endif // WBCOMPELDPLAYER_H
29.5
129
0.762076
[ "vector" ]
a2ee8eb1100a0d4596664cc36ba6ccc7e7b94815
13,446
c
C
src/loader.c
mkeeter/hedgehog
4e5a78fc87cde21f945579e6fbfae69b5f86734b
[ "Apache-2.0", "MIT" ]
114
2019-10-08T04:18:19.000Z
2022-01-20T05:53:56.000Z
src/loader.c
mkeeter/hedgehog
4e5a78fc87cde21f945579e6fbfae69b5f86734b
[ "Apache-2.0", "MIT" ]
7
2021-03-01T00:35:38.000Z
2021-08-05T13:59:01.000Z
src/loader.c
mkeeter/hedgehog
4e5a78fc87cde21f945579e6fbfae69b5f86734b
[ "Apache-2.0", "MIT" ]
9
2020-05-08T00:57:27.000Z
2022-02-12T15:51:59.000Z
#include "camera.h" #include "icosphere.h" #include "loader.h" #include "log.h" #include "mat.h" #include "model.h" #include "object.h" #include "platform.h" #include "worker.h" struct loader_ { const char* filename; /* Model parameters */ GLuint vbo; GLuint ibo; uint32_t tri_count; uint32_t vert_count; vec3_t center; float scale; /* GPU-mapped buffers, populated by main thread */ float* vertex_buf; uint32_t* index_buf; /* Synchronization system */ struct platform_thread_* thread; loader_state_t state; struct platform_mutex_* mutex; struct platform_cond_* cond; /* Each worker thread increments count when they are * done building their vertex set, using the same mutex * and condition variable. */ unsigned count; }; static void* loader_run(void* loader_); void loader_wait(loader_t* loader, loader_state_t target) { platform_mutex_lock(loader->mutex); while (loader->state < target) { platform_cond_wait(loader->cond, loader->mutex); } platform_mutex_unlock(loader->mutex); } void loader_next(loader_t* loader, loader_state_t target) { platform_mutex_lock(loader->mutex); loader->state = target; platform_cond_broadcast(loader->cond); platform_mutex_unlock(loader->mutex); } loader_t* loader_new(const char* filename) { OBJECT_ALLOC(loader); loader->mutex = platform_mutex_new(); loader->cond = platform_cond_new(); loader->filename = filename; loader->thread = platform_thread_new(loader_run, loader); return loader; } static const char* loader_parse_ascii(const char* data, size_t* size) { size_t buf_size = 256; size_t buf_count = 0; float* buffer = (float*)malloc(sizeof(float) * buf_size); #define ABORT_IF(cond, msg) \ if (cond) { \ free(buffer); \ log_error(msg); \ return NULL; \ } #define SKIP_WHILE(cond) \ while (*data && cond(*data)) { \ data++; \ } \ ABORT_IF(*data == 0, "Unexpected file end"); /* The most liberal ASCII STL parser: Ignore everything except * the word 'vertex', then read three floats after each one. */ const char VERTEX_STR[] = "vertex "; while (1) { data = strstr(data, VERTEX_STR); if (!data) { break; } /* Skip to the first character after 'vertex' */ data += strlen(VERTEX_STR); for (unsigned i=0; i < 3; ++i) { /* errno can be set by realloc even on non-failures, * so clear it before the call to strtof */ errno = 0; char* end_ptr = NULL; const float f = strtof(data, &end_ptr); ABORT_IF(errno != 0, "Failed to parse float"); if (buf_size == buf_count) { buf_size *= 2; buffer = (float*)realloc(buffer, buf_size * sizeof(float)); } buffer[buf_count++] = f; data = end_ptr; } } log_trace("Parsed ASCII STL"); ABORT_IF(buf_count % 9 != 0, "Total vertex count isn't divisible by 9"); const uint32_t triangle_count = buf_count / 9; *size = 84 + 50 * triangle_count; char* out = (char*)malloc(*size); /* Copy triangle count into the buffer */ memcpy(&out[80], &triangle_count, 4); /* Copy all of the raw triangles into the buffer, spaced out * like a binary STL file (so that we can re-use the reader) */ for (unsigned i=0; i < buf_count / 9; i++) { memcpy(&out[84 + i*50 + 12], &buffer[i*9], 36); } free(buffer); return out; } static void* loader_run(void* loader_) { loader_t* loader = (loader_t*)loader_; loader_next(loader, LOADER_START); platform_mmap_t* mapped = NULL; const char* data = NULL; size_t size; /* filesize in bytes */ /* This magic filename tells us to load a builtin array, * rather than something in the filesystem */ if (!strcmp(loader->filename, ":/sphere")) { data = icosphere_stl(1, &size); } else { mapped = platform_mmap(loader->filename); if (mapped) { data = platform_mmap_data(mapped); size = platform_mmap_size(mapped); } else { log_error("Could not open %s", loader->filename); loader_next(loader, LOADER_ERROR_NO_FILE); return NULL; } } /* Check whether this is an ASCII stl. Some binary STL files * still start with the word 'solid', so we check the file size * as a second heuristic. */ bool is_ascii = (size >= 6 && !strncmp("solid ", data, 6)); if (is_ascii && size >= 84) { uint32_t tentative_tri_count; memcpy(&tentative_tri_count, &data[80], sizeof(tentative_tri_count)); if (size == tentative_tri_count * 50 + 84) { log_warn("File begins with 'solid' but appears to be " "a binary STL file"); is_ascii = false; } } /* Convert from an ASCII STL to a binary STL so that the rest of the * loader can run unobstructed. */ if (is_ascii) { size_t new_size; const char* new_data = loader_parse_ascii(data, &new_size); if (new_data) { platform_munmap(mapped); mapped = NULL; data = new_data; size = new_size; } else { loader_next(loader, LOADER_ERROR_BAD_ASCII_STL); platform_munmap(mapped); return NULL; } } /* Check whether the file is a valid size. */ if (size < 84) { log_error("File is too small to be an STL (%u < 84)", (unsigned)size); loader_next(loader, LOADER_ERROR_WRONG_SIZE); platform_munmap(mapped); return NULL; } /* Pull the number of triangles from the raw STL data */ memcpy(&loader->tri_count, &data[80], sizeof(loader->tri_count)); /* Compare the actual file size with the expected size */ const uint32_t expected_size = loader->tri_count * 50 + 84; if (expected_size != size) { log_error("Invalid file size for %u triangles (expected %u, got %u)", loader->tri_count, expected_size, (unsigned)size); loader_next(loader, LOADER_ERROR_WRONG_SIZE); platform_munmap(mapped); return NULL; } /* The worker threads deduplicate a subset of the vertices, then * increment loader->count to indicate that they're done. */ const size_t NUM_WORKERS = 6; worker_t workers[NUM_WORKERS]; for (unsigned i=0; i < NUM_WORKERS; ++i) { const size_t start = i * loader->tri_count / NUM_WORKERS; const size_t end = (i + 1) * loader->tri_count / NUM_WORKERS; workers[i].loader = loader; workers[i].tri_count = end - start; workers[i].stl = (const char (*)[50])&data[80 + 4 + 12 + 50 * start]; worker_start(&workers[i]); } /* Wait for all of the worker threads to finish deduplicating vertices */ platform_mutex_lock(loader->mutex); while (loader->count != NUM_WORKERS) { platform_cond_wait(loader->cond, loader->mutex); } platform_mutex_unlock(loader->mutex); log_trace("Workers have deduplicated vertices"); /* Accumulate the total vertex count, then wait for the OpenGL thread * to allocate the vertex and triangle buffers */ loader->vert_count = 0; for (unsigned i=0; i < NUM_WORKERS; ++i) { workers[i].tri_offset = loader->vert_count; loader->vert_count += workers[i].vert_count; } log_trace("Got %u vertices (%u triangles)", loader->vert_count, loader->tri_count); loader_next(loader, LOADER_MODEL_SIZE); log_trace("Waiting for buffer..."); loader_wait(loader, LOADER_GPU_BUFFER); /* Populate GPU pointers, then kick off workers copying triangles */ size_t tri_offset = 0; size_t vert_offset = 0; for (unsigned i=0; i < NUM_WORKERS; ++i) { workers[i].vertex_buf = &loader->vertex_buf[vert_offset]; workers[i].index_buf = &loader->index_buf[tri_offset]; vert_offset += workers[i].vert_count * 3; tri_offset += workers[i].tri_count * 3; } loader_next(loader, LOADER_WORKER_GPU); log_trace("Sent buffers to worker threads"); for (unsigned i=0; i < NUM_WORKERS; ++i) { worker_finish(&workers[i]); } log_trace("Joined worker threads"); /* Reduce min / max arrays from worker subprocesses */ for (unsigned v=0; v < 3; ++v) { for (unsigned i=1; i < NUM_WORKERS; ++i) { if (workers[i].max[v] > workers[0].max[v]) { workers[0].max[v] = workers[i].max[v]; } if (workers[i].min[v] < workers[0].min[v]) { workers[0].min[v] = workers[i].min[v]; } } loader->center.v[v] = (workers[0].max[v] + workers[0].min[v]) / 2.0f; const float d = workers[0].max[v] - workers[0].min[v]; if (d > loader->scale) { loader->scale = d; } } /* Mark the load as done and post an empty event, to make sure that * the main loop wakes up and checks the loader */ log_trace("Loader thread done"); loader_next(loader, LOADER_DONE); glfwPostEmptyEvent(); /* Release any allocated file data */ if (mapped) { platform_munmap(mapped); } else { free((void*)data); } return NULL; } void loader_allocate_vbo(loader_t* loader) { glGenBuffers(1, &loader->vbo); glGenBuffers(1, &loader->ibo); glBindBuffer(GL_ARRAY_BUFFER, loader->vbo); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, loader->ibo); loader_wait(loader, LOADER_MODEL_SIZE); /* Early return if there is an error in the loader; * we leave the buffer allocated so it can be cleaned * up as usual later. */ if (loader->state >= LOADER_ERROR) { return; } /* Allocate and map index buffer */ const size_t ibo_bytes = loader->tri_count * 3 * sizeof(uint32_t); glBufferData(GL_ELEMENT_ARRAY_BUFFER, ibo_bytes, NULL, GL_STATIC_DRAW); loader->index_buf = (uint32_t*)glMapBufferRange( GL_ELEMENT_ARRAY_BUFFER, 0, ibo_bytes, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT | GL_MAP_UNSYNCHRONIZED_BIT); /* Allocate and map vertex buffer */ const size_t vbo_bytes = loader->vert_count * 3 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, vbo_bytes, NULL, GL_STATIC_DRAW); loader->vertex_buf = (float*)glMapBufferRange( GL_ARRAY_BUFFER, 0, vbo_bytes, GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_INVALIDATE_BUFFER_BIT | GL_MAP_UNSYNCHRONIZED_BIT); loader_next(loader, LOADER_GPU_BUFFER); log_trace("Allocated buffer"); } void loader_finish(loader_t* loader, model_t* model, camera_t* camera) { if (!loader->vbo) { log_error_and_abort("Invalid loader VBO"); } else if (!loader->ibo) { log_error_and_abort("Invalid loader IBO"); } else if (!model->vao) { log_error_and_abort("Invalid model VAO"); } loader_wait(loader, LOADER_DONE); /* If the loader succeeded, then set up all of the * GL buffers, matrices, etc. */ if (loader->state == LOADER_DONE) { glBindVertexArray(model->vao); glBindBuffer(GL_ARRAY_BUFFER, loader->vbo); glUnmapBuffer(GL_ARRAY_BUFFER); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, loader->ibo); glUnmapBuffer(GL_ELEMENT_ARRAY_BUFFER); glEnableVertexAttribArray(0); glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, 0); model->vbo = loader->vbo; model->ibo = loader->ibo; model->tri_count = loader->tri_count; camera_set_model(camera, (float*)&loader->center, loader->scale); log_trace("Copied model from loader"); } else { log_error("Loading failed"); } } void loader_delete(loader_t* loader) { if (platform_thread_join(loader->thread)) { log_error_and_abort("Failed to join loader thread"); } platform_mutex_delete(loader->mutex); platform_cond_delete(loader->cond); platform_thread_delete(loader->thread); free(loader); log_trace("Destroyed loader"); } const char* loader_error_string(loader_t* loader) { switch(loader->state) { case LOADER_START: case LOADER_MODEL_SIZE: case LOADER_GPU_BUFFER: case LOADER_WORKER_GPU: return "Invalid state"; case LOADER_DONE: return NULL; case LOADER_ERROR: return "Generic error"; case LOADER_ERROR_NO_FILE: return "File not found"; case LOADER_ERROR_BAD_ASCII_STL: return "Failed to parse ASCII stl"; case LOADER_ERROR_WRONG_SIZE: return "File size does not match triangle count"; } log_error_and_abort("Invalid state %i", loader->state); return NULL; } void loader_increment_count(loader_t* loader) { platform_mutex_lock(loader->mutex); loader->count++; platform_cond_broadcast(loader->cond); platform_mutex_unlock(loader->mutex); }
33.2
78
0.609177
[ "object", "model", "solid" ]
a2f13a7139b1a878a6d671f4b46626c7256ea00d
2,036
h
C
pandatool/src/egg-palettize/txaFileFilter.h
cmarshall108/panda3d-python3
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
[ "PHP-3.0", "PHP-3.01" ]
3
2020-01-02T08:43:36.000Z
2020-07-05T08:59:02.000Z
pandatool/src/egg-palettize/txaFileFilter.h
cmarshall108/panda3d-python3
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
[ "PHP-3.0", "PHP-3.01" ]
null
null
null
pandatool/src/egg-palettize/txaFileFilter.h
cmarshall108/panda3d-python3
8bea2c0c120b03ec1c9fd179701fdeb7510bb97b
[ "PHP-3.0", "PHP-3.01" ]
1
2020-03-11T17:38:45.000Z
2020-03-11T17:38:45.000Z
/** * PANDA 3D SOFTWARE * Copyright (c) Carnegie Mellon University. All rights reserved. * * All use of this software is subject to the terms of the revised BSD * license. You should have received a copy of this license along * with this source code in a file named "LICENSE." * * @file txaFileFilter.h * @author drose * @date 2006-07-27 */ #ifndef TXAFILEFILTER_H #define TXAFILEFILTER_H #include "pandatoolbase.h" #include "texturePoolFilter.h" #include "pt_EggTexture.h" class TxaFile; /** * This is an abstract base class, a placeholder for any number of different * classes that may wish to implement an effect on every texture loaded from * disk via the TexturePool. * * In practice, as of the time of this writing, only the TxaFileFilter (in * pandatool) actually implements this. But other kinds of filters are * possible. * * This filter, once registered, will get a callback and a chance to modify * each texture as it is loaded from disk the first time. If more than one * filter is registered, each will be called in sequence, in the order in * which they were registered. * * The filter does not get called again if the texture is subsequently * reloaded from disk. It is suggested that filters for which this might be a * problem should call tex->set_keep_ram_image(true). */ class EXPCL_MISC TxaFileFilter : public TexturePoolFilter { public: virtual PT(Texture) post_load(Texture *tex); private: static void read_txa_file(); private: static TxaFile *_txa_file; static bool _got_txa_file; public: static TypeHandle get_class_type() { return _type_handle; } static void init_type() { TexturePoolFilter::init_type(); register_type(_type_handle, "TxaFileFilter", TexturePoolFilter::get_class_type()); } virtual TypeHandle get_type() const { return get_class_type(); } virtual TypeHandle force_init_type() {init_type(); return get_class_type();} private: static TypeHandle _type_handle; }; #include "txaFileFilter.I" #endif
27.890411
78
0.735265
[ "3d" ]
a2f54f57563076590338deb708302e9a7e7f0c71
4,480
h
C
Platform/OSX/Frameworks/OgreRelease.framework/Versions/A/Headers/OgreRay.h
unconed/NFSpace
bbd544afb32a10bc4ee497e1d58cefe4bbbe7953
[ "BSD-3-Clause" ]
91
2015-01-19T11:03:56.000Z
2022-03-12T15:54:06.000Z
Platform/OSX/Frameworks/OgreDebug.framework/Versions/A/Headers/OgreRay.h
unconed/NFSpace
bbd544afb32a10bc4ee497e1d58cefe4bbbe7953
[ "BSD-3-Clause" ]
null
null
null
Platform/OSX/Frameworks/OgreDebug.framework/Versions/A/Headers/OgreRay.h
unconed/NFSpace
bbd544afb32a10bc4ee497e1d58cefe4bbbe7953
[ "BSD-3-Clause" ]
9
2015-03-16T03:36:50.000Z
2021-06-17T09:47:26.000Z
/* ----------------------------------------------------------------------------- This source file is part of OGRE (Object-oriented Graphics Rendering Engine) For the latest info, see http://www.ogre3d.org/ Copyright (c) 2000-2006 Torus Knot Software Ltd Also see acknowledgements in Readme.html This program is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA, or go to http://www.gnu.org/copyleft/lesser.txt. You may alternatively use this source under the terms of a specific version of the OGRE Unrestricted License provided you have obtained such a license from Torus Knot Software Ltd. ----------------------------------------------------------------------------- */ #ifndef __Ray_H_ #define __Ray_H_ // Precompiler options #include "OgrePrerequisites.h" #include "OgreVector3.h" #include "OgrePlaneBoundedVolume.h" namespace Ogre { /** Representation of a ray in space, i.e. a line with an origin and direction. */ class _OgreExport Ray { protected: Vector3 mOrigin; Vector3 mDirection; public: Ray():mOrigin(Vector3::ZERO), mDirection(Vector3::UNIT_Z) {} Ray(const Vector3& origin, const Vector3& direction) :mOrigin(origin), mDirection(direction) {} /** Sets the origin of the ray. */ void setOrigin(const Vector3& origin) {mOrigin = origin;} /** Gets the origin of the ray. */ const Vector3& getOrigin(void) const {return mOrigin;} /** Sets the direction of the ray. */ void setDirection(const Vector3& dir) {mDirection = dir;} /** Gets the direction of the ray. */ const Vector3& getDirection(void) const {return mDirection;} /** Gets the position of a point t units along the ray. */ Vector3 getPoint(Real t) const { return Vector3(mOrigin + (mDirection * t)); } /** Gets the position of a point t units along the ray. */ Vector3 operator*(Real t) const { return getPoint(t); }; /** Tests whether this ray intersects the given plane. @returns A pair structure where the first element indicates whether an intersection occurs, and if true, the second element will indicate the distance along the ray at which it intersects. This can be converted to a point in space by calling getPoint(). */ std::pair<bool, Real> intersects(const Plane& p) const { return Math::intersects(*this, p); } /** Tests whether this ray intersects the given plane bounded volume. @returns A pair structure where the first element indicates whether an intersection occurs, and if true, the second element will indicate the distance along the ray at which it intersects. This can be converted to a point in space by calling getPoint(). */ std::pair<bool, Real> intersects(const PlaneBoundedVolume& p) const { return Math::intersects(*this, p.planes, p.outside == Plane::POSITIVE_SIDE); } /** Tests whether this ray intersects the given sphere. @returns A pair structure where the first element indicates whether an intersection occurs, and if true, the second element will indicate the distance along the ray at which it intersects. This can be converted to a point in space by calling getPoint(). */ std::pair<bool, Real> intersects(const Sphere& s) const { return Math::intersects(*this, s); } /** Tests whether this ray intersects the given box. @returns A pair structure where the first element indicates whether an intersection occurs, and if true, the second element will indicate the distance along the ray at which it intersects. This can be converted to a point in space by calling getPoint(). */ std::pair<bool, Real> intersects(const AxisAlignedBox& box) const { return Math::intersects(*this, box); } }; } #endif
38.62069
88
0.683259
[ "object" ]
a2f5a9767af6df2f82b4368948a179cd568d4efc
3,806
h
C
MaterialLib/SolidModels/CreepBGRa.h
Bernie2019/ogs
80b66724d72d8ce01e02ddcd1fb6866c90b41c1d
[ "BSD-4-Clause" ]
1
2021-06-25T13:43:06.000Z
2021-06-25T13:43:06.000Z
MaterialLib/SolidModels/CreepBGRa.h
Bernie2019/ogs
80b66724d72d8ce01e02ddcd1fb6866c90b41c1d
[ "BSD-4-Clause" ]
null
null
null
MaterialLib/SolidModels/CreepBGRa.h
Bernie2019/ogs
80b66724d72d8ce01e02ddcd1fb6866c90b41c1d
[ "BSD-4-Clause" ]
2
2018-03-01T13:07:12.000Z
2018-03-01T13:16:22.000Z
/** * \copyright * Copyright (c) 2012-2017, OpenGeoSys Community (http://www.opengeosys.org) * Distributed under a Modified BSD License. * See accompanying file LICENSE.txt or * http://www.opengeosys.org/project/license * * \file * Created on July 6, 2018, 9:53 AM */ #pragma once #include <cmath> #include <memory> #include <tuple> #include "LinearElasticIsotropic.h" #include "MathLib/KelvinVector.h" #include "NumLib/NewtonRaphson.h" #include "ParameterLib/Parameter.h" namespace MaterialLib { namespace Solids { namespace Creep { /** * \brief A class for computing the BGRa creep model, which reads * \f[ * \dot {\mathbf{\epsilon}}^{cr}=\sqrt{\frac{3}{2}}A \mathrm{e}^{-\frac{Q}{RT}} * \left(\frac{\sigma_{eff}}{\sigma_0}\right)^n\frac{\mathbf{s}}{||\mathbf{s}||} * \f] * where \f$\sigma_{eff}=\sqrt{\frac{3}{2}}||\mathbf{s}||\f$, \f$A, \sigma_0, n, * Q\f$ are parameter, and \f$R\f$ is the gas constant. */ template <int DisplacementDim> class CreepBGRa final : public LinearElasticIsotropic<DisplacementDim> { public: using LinearElasticIsotropic<DisplacementDim>::KelvinVectorSize; using ResidualVectorType = Eigen::Matrix<double, KelvinVectorSize, 1>; using JacobianMatrix = Eigen::Matrix<double, KelvinVectorSize, KelvinVectorSize, Eigen::RowMajor>; using KelvinVector = MathLib::KelvinVector::KelvinVectorType<DisplacementDim>; using KelvinMatrix = MathLib::KelvinVector::KelvinMatrixType<DisplacementDim>; using Parameter = ParameterLib::Parameter<double>; std::unique_ptr< typename MechanicsBase<DisplacementDim>::MaterialStateVariables> createMaterialStateVariables() const override { return LinearElasticIsotropic< DisplacementDim>::createMaterialStateVariables(); } CreepBGRa( typename LinearElasticIsotropic<DisplacementDim>::MaterialProperties mp, NumLib::NewtonRaphsonSolverParameters nonlinear_solver_parameters, Parameter const& A, Parameter const& n, Parameter const& sigma_f, Parameter const& Q) : LinearElasticIsotropic<DisplacementDim>(std::move(mp)), _nonlinear_solver_parameters(std::move(nonlinear_solver_parameters)), _a(A), _n(n), _sigma_f(sigma_f), _q(Q) { } boost::optional<std::tuple<KelvinVector, std::unique_ptr<typename MechanicsBase< DisplacementDim>::MaterialStateVariables>, KelvinMatrix>> integrateStress( double const t, ParameterLib::SpatialPosition const& x, double const dt, KelvinVector const& eps_prev, KelvinVector const& eps, KelvinVector const& sigma_prev, typename MechanicsBase<DisplacementDim>::MaterialStateVariables const& material_state_variables, double const T) const override; ConstitutiveModel getConstitutiveModel() const override { return ConstitutiveModel::CreepBGRa; } double getTemperatureRelatedCoefficient( double const t, double const dt, ParameterLib::SpatialPosition const& x, double const T, double const deviatoric_stress_norm) const override; private: NumLib::NewtonRaphsonSolverParameters const _nonlinear_solver_parameters; Parameter const& _a; /// A parameter determined by experiment. Parameter const& _n; /// Creep rate exponent n. Parameter const& _sigma_f; /// A stress scaling factor. Parameter const& _q; /// Activation energy }; extern template class CreepBGRa<2>; extern template class CreepBGRa<3>; } // end of namespace Creep } // end of namespace Solids } // namespace MaterialLib
33.982143
80
0.67341
[ "model" ]
0c02cac1e39872d05c67fc8d9f00677c33d17e96
3,560
h
C
src/gl/VertexBuffer.h
UnrealKaraulov/bspguy_alt
c55cc7fea671bb0d4df1cb193cb6348b694c4844
[ "Unlicense" ]
8
2022-01-25T07:54:29.000Z
2022-01-29T06:52:25.000Z
src/gl/VertexBuffer.h
Ariper999/newbspguy
c662462dd612b85b6ea4507bc462681351d84f73
[ "Unlicense" ]
10
2022-02-09T19:56:03.000Z
2022-03-20T04:06:26.000Z
src/gl/VertexBuffer.h
Ariper999/newbspguy
c662462dd612b85b6ea4507bc462681351d84f73
[ "Unlicense" ]
2
2022-01-21T13:52:59.000Z
2022-02-20T17:12:56.000Z
#pragma once #include <GL/glew.h> #include <vector> #include "ShaderProgram.h" #include "util.h" // Combinable flags for setting common vertex attributes #define TEX_2B (1 << 0) // 2D unsigned char texture coordinates #define TEX_2S (1 << 1) // 2D short texture coordinates #define TEX_2F (1 << 2) // 2D float texture coordinates #define COLOR_3B (1 << 3) // RGB unsigned char color values #define COLOR_3F (1 << 4) // RGB float color values #define COLOR_4B (1 << 5) // RGBA unsigned char color values #define COLOR_4F (1 << 6) // RGBA float color values #define NORM_3B (1 << 7) // 3D unsigned char normal coordinates #define NORM_3F (1 << 8) // 3D float normal coordinates #define POS_2B (1 << 9) // 2D unsigned char position coordinates #define POS_2S (1 << 10) // 2D short position coordinates #define POS_2I (1 << 11) // 2D integer position coordinates #define POS_2F (1 << 12) // 2D float position coordinates #define POS_3S (1 << 13) // 3D short position coordinates #define POS_3F (1 << 14) // 3D float position coordinates // starting bits for the different types of vertex attributes #define VBUF_TEX_START 0 // first bit for texture flags #define VBUF_COLOR_START 3 // first bit for color flags #define VBUF_NORM_START 7 // first bit for normals flags #define VBUF_POS_START 9 // first bit for position flags #define VBUF_FLAGBITS 15 // number of settable bits #define VBUF_TEX_MASK 0x7 // mask for all texture flags #define VBUF_COLOR_MASK 0x78 // mask for all color flags #define VBUF_NORM_MASK 0x180 // mask for all normal flags struct VertexAttr { int numValues; int valueType; // Ex: GL_FLOAT int handle; // location in shader program (-1 indicates invalid attribute) int size; // size of the attribute in bytes int normalized; // GL_TRUE/GL_FALSE Ex: unsigned char color values are normalized (0-255 = 0.0-1.0) const char* varName; VertexAttr() : handle(-1) {} VertexAttr(int numValues, int valueType, int handle, int normalized, const char* varName); }; class VertexBuffer { public: unsigned char* data = NULL; std::vector<VertexAttr> attribs; int elementSize; GLsizei numVerts; bool ownData = false; // set to true if buffer should delete data on destruction // Specify which common attributes to use. They will be located in the // shader program. If passing data, note that data is not copied, but referenced VertexBuffer(ShaderProgram* shaderProgram, int attFlags); VertexBuffer(ShaderProgram* shaderProgram, int attFlags, const void* dat, GLsizei numVerts); ~VertexBuffer(); // Note: Data is not copied into the class - don't delete your data. // Data will be deleted when the buffer is destroyed. void setData(const void* data, GLsizei numVerts); void upload(); void deleteBuffer(); void setShader(ShaderProgram* program, bool hideErrors = false); void drawRange(int primitive, GLint start, GLsizei end); void draw(int primitive); void addAttribute(int numValues, int valueType, int normalized, const char* varName); void addAttribute(int type, const char* varName); void bindAttributes(bool hideErrors = false); // find handles for all vertex attributes (call from main thread only) private: ShaderProgram* shaderProgram = NULL; // for getting handles to vertex attributes unsigned int vboId = -1; bool attributesBound = false; // add attributes according to the attribute flags void addAttributes(int attFlags); };
40.454545
118
0.709831
[ "vector", "3d" ]
0c03cb2e4837143e2e7567a8b2ddcbc196dc7b35
4,946
h
C
cpp/util/json_wrapper.h
laiqu/certificate-transparency
9e1bd563bf19188373edee5ae7195f9cdced7a74
[ "Apache-2.0" ]
1
2015-04-09T15:34:59.000Z
2015-04-09T15:34:59.000Z
cpp/util/json_wrapper.h
laiqu/certificate-transparency
9e1bd563bf19188373edee5ae7195f9cdced7a74
[ "Apache-2.0" ]
null
null
null
cpp/util/json_wrapper.h
laiqu/certificate-transparency
9e1bd563bf19188373edee5ae7195f9cdced7a74
[ "Apache-2.0" ]
null
null
null
/* -*- mode: c++; indent-tabs-mode: nil -*- */ #ifndef JSON_WRAPPER_H #define JSON_WRAPPER_H #include <glog/logging.h> #include <json.h> #undef TRUE // json.h pollution #undef FALSE // json.h pollution #include <event2/buffer.h> #include <sstream> #include <string> #include "base/macros.h" #include "proto/serializer.h" #include "util/util.h" class JsonArray; // It appears that a new object, e.g. from a string, has a reference count // of 1, and that any objects "got" from it will get freed when it is freed. // Note that a JsonObject that is not Ok() should not be used for anything. class JsonObject { public: explicit JsonObject(json_object* obj) : obj_(obj) { } explicit JsonObject(const std::ostringstream& response) { obj_ = json_tokener_parse(response.str().c_str()); } explicit JsonObject(const std::string& response) { obj_ = json_tokener_parse(response.c_str()); } // This constructor is destructive: if a JSON object is parsed // correctly, it will remove it from the front of the buffer. In // case of an error, the buffer is left unchanged. explicit JsonObject(evbuffer* buffer); JsonObject(const JsonArray& from, int offset, json_type type = json_type_object); JsonObject() : obj_(json_object_new_object()) { } ~JsonObject() { if (obj_) json_object_put(obj_); } // Get the object out, and stop tracking it so we _won't_ put() it // when we are destroyed. The caller needs to ensure it is freed. json_object* Extract() { json_object* tmp = obj_; obj_ = NULL; return tmp; } bool Ok() const { return obj_ != NULL; } bool IsType(json_type type) const { return json_object_is_type(obj_, type); } const char* ToJson() const { return json_object_to_json_string(obj_); } void Add(const char* name, const JsonObject& addand) { Add(name, addand.obj_); } void Add(const char* name, int64_t value) { Add(name, json_object_new_int64(value)); } void Add(const char* name, const std::string& value) { Add(name, json_object_new_string(value.c_str())); } void AddBase64(const char* name, const std::string& value) { Add(name, util::ToBase64(value)); } void Add(const char* name, const ct::DigitallySigned& ds) { std::string signature; CHECK_EQ(Serializer::SerializeDigitallySigned(ds, &signature), Serializer::OK); AddBase64(name, signature); } void AddBoolean(const char* name, bool b) { Add(name, json_object_new_boolean(b)); } const char* ToString() const { return json_object_to_json_string(obj_); } std::string DebugString() const { return json_object_to_json_string_ext(obj_, JSON_C_TO_STRING_PRETTY); } protected: JsonObject(const JsonObject& from, const char* field, json_type type) { if (json_object_object_get_ex(from.obj_, field, &obj_)) { if (!json_object_is_type(obj_, type)) { LOG(ERROR) << "Don't understand " << field << " field: " << from.ToJson(); obj_ = NULL; return; } } else { LOG(ERROR) << "No " << field << " field"; return; } // Increment reference count json_object_get(obj_); } json_object* obj_; private: void Add(const char* name, json_object* obj) { json_object_object_add(obj_, name, obj); } DISALLOW_COPY_AND_ASSIGN(JsonObject); }; class JsonBoolean : public JsonObject { public: JsonBoolean(const JsonObject& from, const char* field) : JsonObject(from, field, json_type_boolean) { } bool Value() { return json_object_get_boolean(obj_); } }; class JsonString : public JsonObject { public: JsonString(const JsonObject& from, const char* field) : JsonObject(from, field, json_type_string) { } JsonString(const JsonArray& from, int offset) : JsonObject(from, offset, json_type_string) { } const char* Value() { return json_object_get_string(obj_); } std::string FromBase64() { return util::FromBase64(Value()); } }; class JsonInt : public JsonObject { public: explicit JsonInt(json_object* jint) : JsonObject(jint) { } JsonInt(const JsonObject& from, const char* field) : JsonObject(from, field, json_type_int) { } int64_t Value() const { return json_object_get_int64(obj_); } }; class JsonArray : public JsonObject { public: JsonArray(const JsonObject& from, const char* field) : JsonObject(from, field, json_type_array) { } JsonArray() : JsonObject(json_object_new_array()) { } void Add(json_object* addand) { json_object_array_add(obj_, addand); } void Add(const std::string& addand) { Add(json_object_new_string(addand.c_str())); } void Add(JsonObject* addand) { Add(addand->Extract()); } void AddBase64(const std::string& addand) { Add(util::ToBase64(addand)); } int Length() const { return json_object_array_length(obj_); } }; #endif
23.665072
76
0.668217
[ "object" ]
0c065ecc348ccbcae9d18ccdef1c9c25158092f3
6,596
h
C
examples/scriptable/duality/scripting_bridge.h
nachooya/naclports-1
43e9f13a836c1f6c609bb1418f672bd2041c3888
[ "BSD-3-Clause" ]
1
2019-01-17T23:49:48.000Z
2019-01-17T23:49:48.000Z
examples/scriptable/duality/scripting_bridge.h
nachooya/naclports-1
43e9f13a836c1f6c609bb1418f672bd2041c3888
[ "BSD-3-Clause" ]
null
null
null
examples/scriptable/duality/scripting_bridge.h
nachooya/naclports-1
43e9f13a836c1f6c609bb1418f672bd2041c3888
[ "BSD-3-Clause" ]
null
null
null
// Copyright 2010 The Native Client SDK Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can // be found in the LICENSE file. #ifndef EXAMPLES_SCRIPTABLE_DUALITY_SCRIPTING_BRIDGE_H_ #define EXAMPLES_SCRIPTABLE_DUALITY_SCRIPTING_BRIDGE_H_ #include <examples/scriptable/duality/scriptable.h> #include <nacl/nacl_npapi.h> #include <map> // General scripting bridge class that uses an implementable interface to // provide NaCl scriptability. class ScriptingBridge : public NPObject { public: explicit ScriptingBridge(Scriptable * scriptable_instance); virtual ~ScriptingBridge(); // Creates the plugin-side instance of NPObject. // Called by NPN_CreateObject, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass template<typename ScriptableType> static NPObject* AllocateCallback(NPP npp, NPClass* npclass); // Called by NPP_GetScriptableInstance to get the scripting interface for // a plugin object of ScriptableType. The browser may dereference the // returned pointer any time after it gets it, until the plugin is cleaned // up. It should clean it up whenever it decides to collect garbage and // there are no references remaining. template<typename ScriptableType> static NPClass* GetNPSimpleClass(); private: // Cleans up the plugin-side instance of an NPObject. // Called by NPN_ReleaseObject, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static void DeallocateCallback(NPObject* object); // Returns the value of the property called |name| in |result| and true. // Returns false if |name| is not a property on this object or something else // goes wrong. // Called by NPN_GetProperty, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool GetPropertyCallback(NPObject* object, NPIdentifier name, NPVariant* result); // Returns |true| if |method_name| is a recognized method. // Called by NPN_HasMethod, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool HasMethodCallback(NPObject* object, NPIdentifier name); // Returns true if |name| is actually the name of a public property on the // plugin class being queried. // Called by NPN_HasProperty, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool HasPropertyCallback(NPObject* object, NPIdentifier name); // Not public because it is called by Allocate void Init(); // Called by the browser when a plugin is being destroyed to clean up any // remaining instances of NPClass. // Documentation URL: https://developer.mozilla.org/en/NPClass static void InvalidateCallback(NPObject* object); void Invalidate(); // Called by the browser to invoke a function object whose name is |name|. // Called by NPN_Invoke, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool InvokeCallback(NPObject* object, NPIdentifier name, const NPVariant* args, uint32_t arg_count, NPVariant* result); // Called by the browser to invoke the default method on an NPObject. // In this case the default method just returns false. // Apparently the plugin won't load properly if we simply // tell the browser we don't have this method. // Called by NPN_InvokeDefault, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool InvokeDefaultCallback(NPObject* object, const NPVariant* args, uint32_t arg_count, NPVariant* result); // Removes the property |name| from |object| and returns true. // Returns false if it can't be removed for some reason. // Called by NPN_RemoveProperty, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool RemovePropertyCallback(NPObject* object, NPIdentifier name); // Sets the property |name| of |object| to |value| and return true. // Returns false if |name| is not the name of a settable property on |object| // or if something else goes wrong. // Called by NPN_SetProperty, declared in npruntime.h // Documentation URL: https://developer.mozilla.org/en/NPClass static bool SetPropertyCallback(NPObject* object, NPIdentifier name, const NPVariant* value); // Not called GetInstance because that would be misleading. // GetInstance would imply a singleton design pattern. ToTYPE implies // the type cast which is appropriate. static ScriptingBridge * ToInstance(NPObject * object); Scriptable * scriptable_instance_; }; template<typename ScriptableType> NPClass* ScriptingBridge::GetNPSimpleClass() { printf("Duality: ScriptingBridge::GetNPSimpleClass was called!\n"); fflush(stdout); void* np_class_pointer = NPN_MemAlloc(sizeof(NPClass)); NPClass* np_class = static_cast<NPClass *>(np_class_pointer); np_class->structVersion = NP_CLASS_STRUCT_VERSION; np_class->allocate = ScriptingBridge::AllocateCallback<ScriptableType>; np_class->deallocate = ScriptingBridge::DeallocateCallback; np_class->invalidate = ScriptingBridge::InvalidateCallback; np_class->hasMethod = ScriptingBridge::HasMethodCallback; np_class->invoke = ScriptingBridge::InvokeCallback; np_class->invokeDefault = ScriptingBridge::InvokeDefaultCallback; np_class->hasProperty = ScriptingBridge::HasPropertyCallback; np_class->getProperty = ScriptingBridge::GetPropertyCallback; np_class->setProperty = ScriptingBridge::SetPropertyCallback; np_class->removeProperty = ScriptingBridge::RemovePropertyCallback; np_class->enumerate = NULL; np_class->construct = NULL; return np_class; } template<typename ScriptableType> NPObject* ScriptingBridge::AllocateCallback(NPP npp, NPClass* npclass) { printf("Duality: ScriptingBridge::Allocate was called!\n"); fflush(stdout); ScriptableType* scriptable_object = new ScriptableType(); ScriptingBridge* bridge = new ScriptingBridge(scriptable_object); bridge->Init(); scriptable_object->Init(npp); printf("Duality: ScriptingBridge::Allocate returning a bridge!\n"); fflush(stdout); return bridge; } #endif // EXAMPLES_SCRIPTABLE_DUALITY_SCRIPTING_BRIDGE_H_
43.111111
79
0.728169
[ "object" ]
0c0bc323a2945fcd55c048f01b701c54f933b7b9
1,079
h
C
Gems/GraphCanvas/Code/Source/Components/Nodes/Comment/CommentLayerControllerComponent.h
cypherdotXd/o3de
bb90c4ddfe2d495e9c00ebf1e2650c6d603a5676
[ "Apache-2.0", "MIT" ]
11
2021-07-08T09:58:26.000Z
2022-03-17T17:59:26.000Z
Gems/GraphCanvas/Code/Source/Components/Nodes/Comment/CommentLayerControllerComponent.h
RoddieKieley/o3de
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
[ "Apache-2.0", "MIT" ]
29
2021-07-06T19:33:52.000Z
2022-03-22T10:27:49.000Z
Gems/GraphCanvas/Code/Source/Components/Nodes/Comment/CommentLayerControllerComponent.h
RoddieKieley/o3de
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
[ "Apache-2.0", "MIT" ]
4
2021-07-06T19:24:43.000Z
2022-03-31T12:42:27.000Z
/* * Copyright (c) Contributors to the Open 3D Engine Project. * For complete copyright and license terms please see the LICENSE at the root of this distribution. * * SPDX-License-Identifier: Apache-2.0 OR MIT * */ #pragma once #include <Source/Components/LayerControllerComponent.h> namespace GraphCanvas { class CommentLayerControllerComponent : public LayerControllerComponent { public: static void Reflect(AZ::ReflectContext* context) { AZ::SerializeContext* serializeContext = azrtti_cast<AZ::SerializeContext*>(context); if (serializeContext) { serializeContext->Class<CommentLayerControllerComponent, LayerControllerComponent>() ->Version(0) ; } } AZ_COMPONENT(CommentLayerControllerComponent, "{E6E6A329-40DA-4F95-B42E-6843DF2B6E2E}", LayerControllerComponent); CommentLayerControllerComponent() : LayerControllerComponent("CommentLayer", CommentOffset) { } }; }
27.666667
122
0.650602
[ "3d" ]
0c0fd024149f6845d1eb851d2023c5ae04f9dead
8,061
h
C
src/cpIpcNode.h
asc135/CodePort
306d40d0a6d5ccb249b22249f2b3702ac09c021b
[ "BSD-3-Clause" ]
null
null
null
src/cpIpcNode.h
asc135/CodePort
306d40d0a6d5ccb249b22249f2b3702ac09c021b
[ "BSD-3-Clause" ]
null
null
null
src/cpIpcNode.h
asc135/CodePort
306d40d0a6d5ccb249b22249f2b3702ac09c021b
[ "BSD-3-Clause" ]
null
null
null
// ---------------------------------------------------------------------------- // CodePort++ // // A Portable Operating System Abstraction Library // Copyright 2012 Amardeep S. Chana. All rights reserved. // Use of this software is bound by the terms of the Modified BSD License. // // Module Name: cpIpcNode.h // // Description: IPC endpoint node. // // Platform: common // // History: // 2012-09-28 asc Creation. // 2012-12-13 asc Added multiple dispatch thread capability. // 2013-01-17 asc Added exit control semaphore. // 2013-02-01 asc Changed validation mechanism. // 2013-02-13 asc Added SendCtl() and adjusted message field usage. // 2013-03-11 asc Added Connect() and Disconnect() methods. // 2013-03-21 asc Changed Transport object to a pointer to enable inheritance. // 2013-03-22 asc Added support for accumulator timeout handling. // 2013-04-24 asc Added StopNode() method. // 2013-05-16 asc Added CheckForExit() method. // 2013-08-27 asc Refactored name resolver management. // 2013-09-30 asc Added support for node startup sync. // 2013-10-14 asc Added support for watchdog control message. // ---------------------------------------------------------------------------- #ifndef CP_IPCNODE_H #define CP_IPCNODE_H #include "cpIpcNodeUtil.h" #include "cpIpcResolver.h" namespace cp { // forward references class Buffer; class Datum; class IoDev; class IpcSegment; class IpcTransport; // ---------------------------------------------------------------------------- // the IPC node interface class IpcNode : public Base { public: // local types typedef void *(*WatchDogFunc_t)(void *pContext); // thread enumerations enum Threads { IpcReceive, IpcTransmit }; // constructor IpcNode(String const &NodeName); // destructor ~IpcNode(); // accessors uint32_t NodeAddr() { return m_NodeAddr; } // return the current node address IpcResolver &Resolver() { return m_Resolver; } // return address resolver instance bool CheckForExit() { return m_Flag1; } // return true if exit has been signalled bool WaitForSync(uint32_t Timeout = k_InfiniteTimeout) { return m_SemStart.Take(Timeout); } // block until start sync condition bool WaitForExit(uint32_t Timeout = k_InfiniteTimeout) { return m_SemExit.Take(Timeout); } // block until node exit condition // send and receive methods uint32_t SendCtl(uint32_t Address, // forward a message to the router uint8_t CtlCode, uint32_t Context = 0, uint8_t Priority = k_IpcDefaultPriority); uint32_t SendBuf(uint32_t Address, // forward a message to the router char const *pBuf, size_t MsgLen, uint8_t MsgType, uint8_t CtlCode = 0, uint32_t Context = 0, uint8_t Priority = k_IpcDefaultPriority); uint32_t SendDat(uint32_t Address, // forward a message to the router Datum &Dat, uint8_t CtlCode = 0, uint32_t Context = 0, uint8_t Priority = k_IpcDefaultPriority); uint32_t SendSeg(IpcSegment *pSeg); // forward a message to the router bool GetResponse(uint32_t MsgId, // get a response IpcSegment *&pResponse, uint32_t Timeout = k_ResponseTimeout); // manipulators bool Connect(uint32_t Address); // sets up a persistent IPC channel bool Disconnect(uint32_t Address); // tears down a persistent IPC channel void WatchDogSet(WatchDogFunc_t pFunc, void *pContext); // configure the watchdog function void WatchDog(); // invoke the watch dog operation void StartSync() { m_SemStart.Give(); } // give the start sync semaphore void SignalExit() { m_Flag1 = m_SemExit.Give(); } // signal node to exit void NodeAddrSet(uint32_t Addr) { m_NodeAddr = Addr; } // set the node address void TransportSet(IpcTransport *pTransport); // sets the transport device bool DevicesSet(IoDev *pSendDevice, IoDev *pRecvDevice, uint32_t ValidateNode = 0); // set send and receive device void ResolverNameSet(String const &Name) // set the resolver node name { m_ResolverNodeName = Name; } void ResolverAddrSet(uint32_t Address) // set the resolver node address { m_ResolverNodeAddr = Address; } bool StartNode(); // start I/O operations void StopNode(); // stop I/O operations bool RegisterHandler(DispatchHandler_t pHandler, uint32_t MsgId, uint32_t NumThreads, void *pContext = NULL); // register a handler function to process received messages bool RemoveHandler(DispatchHandler_t pHandler, uint32_t MsgId); // remove a handler function // system management methods void ExpiredAccumNotify(IpcSegment const *pSeg); // notify node of an expired accumulator void ValidMessageNotify(IpcSegment const *pSeg); // notify node of a valid message void FlushAddrCache(); // flush the address cache private: IpcNode(IpcNode const &rhs); // copy constructor (disabled) IpcNode &operator=(IpcNode const &rhs); // assignment operator (disabled) bool CreateSegment(IpcSegment *&pSeg, uint32_t DestAddr, uint8_t Priority, uint32_t Context, uint8_t MsgType, uint8_t OpCode); // create a segment and populate fields uint32_t TransmitMessage(IpcSegment *pSeg) // queue up a message for transmission { return m_TransmitQueue.TransmitMessage(pSeg); } void ProcessTimeouts(); // process timeouts and cleanup stale resources void RecvThread(); // communications receive thread function void XmitThread(); // communications transmit thread function static void *ThreadFunction(Thread *pThread); // static thread trampoline function uint32_t m_NodeAddr; // the address of the node using this interface uint32_t m_ResolverNodeAddr; // address of the resolver node String m_ResolverNodeName; // name of the resolver node SemLite m_SemStart; // start sync semaphore SemLite m_SemExit; // exit control semaphore Thread m_RecvThread; // receive thread object Thread m_XmitThread; // transmit thread object IpcTransport *m_PtrTransport; // IPC transport layer object IpcAccumMap m_AccumMap; // map of IPC message segment accumulators IpcTransmitQueue m_TransmitQueue; // queue of outgoing messages IpcResolver m_Resolver; // address resolver and cache WatchDogFunc_t m_PtrWatchDogFunc; // pointer to the watchdog function void *m_PtrWatchDogParam; // parameter to the watchdog function }; } // namespace cp #endif // CP_IPCNODE_H
45.03352
119
0.557871
[ "object" ]
0c20f162888dd8d51a26df1348fd18a446debbe5
2,848
h
C
src/cluster.h
ChrisZMF/kvrocks
0d91bff3b534d7e86b40a5ffe3a9749378b1a9cd
[ "BSD-3-Clause" ]
null
null
null
src/cluster.h
ChrisZMF/kvrocks
0d91bff3b534d7e86b40a5ffe3a9749378b1a9cd
[ "BSD-3-Clause" ]
null
null
null
src/cluster.h
ChrisZMF/kvrocks
0d91bff3b534d7e86b40a5ffe3a9749378b1a9cd
[ "BSD-3-Clause" ]
null
null
null
#pragma once #include <string> #include <vector> #include <bitset> #include <memory> #include <algorithm> #include <unordered_map> #include "status.h" #include "rw_lock.h" #include "redis_cmd.h" #include "redis_slot.h" #include "redis_connection.h" enum { kClusterMaster = 1, kClusterSlave = 2, kClusetNodeIdLen = 40, kClusterPortIncr = 10000, kClusterSlots = HASH_SLOTS_SIZE, }; class ClusterNode { public: explicit ClusterNode(std::string id, std::string host, int port, int role, std::string master_id, std::bitset<kClusterSlots> slots); std::string id_; std::string host_; int port_; int role_; std::string master_id_; std::string slots_info_; std::bitset<kClusterSlots> slots_; std::vector<std::string> replicas; int importing_slot_ = -1; }; struct SlotInfo { int start; int end; struct NodeInfo { std::string host; int port; std::string id; }; std::vector<NodeInfo> nodes; }; typedef std::unordered_map<std::string, std::shared_ptr<ClusterNode>> ClusterNodes; class Server; class Cluster { public: explicit Cluster(Server *svr, std::vector<std::string> binds, int port); Status SetClusterNodes(const std::string &nodes_str, int64_t version, bool force); Status GetClusterNodes(std::string *nodes_str); Status SetNodeId(std::string node_id); Status SetSlot(int slot, std::string node_id); Status GetSlotsInfo(std::vector<SlotInfo> *slot_infos); Status GetClusterInfo(std::string *cluster_infos); uint64_t GetVersion() { return version_; } bool IsValidSlot(int slot) { return slot >= 0 && slot < kClusterSlots; } Status CanExecByMySelf(const Redis::CommandAttributes *attributes, const std::vector<std::string> &cmd_tokens, Redis::Connection *conn); void SetMasterSlaveRepl(); Status MigrateSlot(std::string dst_node, int slot); Status ImportSlot(Redis::Connection *conn, int slot, int state); Status GetMigrateInfo(int slot, std::vector<std::string> *info); Status GetImportInfo(int slot, std::vector<std::string> *info); Status GetSlotKeys(Redis::Connection *conn, int slot, int count, std::string *output); std::string GetMyId() const { return myid_; } static bool SubCommandIsExecExclusive(const std::string &subcommand); private: std::string GenNodesDescription(); SlotInfo GenSlotNodeInfo(int start, int end, std::shared_ptr<ClusterNode> n); Status ParseClusterNodes(const std::string &nodes_str, ClusterNodes *nodes, std::unordered_map<int, std::string> *slots_nodes); Server *svr_; std::vector<std::string> binds_; int port_; int size_; int64_t version_; std::string myid_; std::shared_ptr<ClusterNode> myself_; ClusterNodes nodes_; std::shared_ptr<ClusterNode> slots_nodes_[kClusterSlots]; std::mutex cluster_mutex_; };
29.978947
88
0.711728
[ "vector" ]
0c2a6d9642ea40af41af29becce6954781d309f4
5,399
c
C
d/retired/rolon.c
gesslar/shadowgate
97ce5d33a2275bb75c0cf6556602564b7870bc77
[ "MIT" ]
13
2019-07-19T05:24:44.000Z
2021-11-18T04:08:19.000Z
d/retired/rolon.c
gesslar/shadowgate
97ce5d33a2275bb75c0cf6556602564b7870bc77
[ "MIT" ]
null
null
null
d/retired/rolon.c
gesslar/shadowgate
97ce5d33a2275bb75c0cf6556602564b7870bc77
[ "MIT" ]
13
2019-09-12T06:22:38.000Z
2022-01-31T01:15:12.000Z
#include <std.h> #include "/d/islands/pirates/piratedefs.h" inherit MONSTER; void create() { object equip; ::create(); set_id(({"rolon","human","Rolon","pirate"})); set_name("Rolon"); set_short("%^BOLD%^%^BLACK%^Rolon %^BOLD%^%^RED%^Garrote%^BOLD%^%^BLACK%^, %^BOLD%^%^WHITE%^pirate extraordinaire%^RESET%^"); set_long("%^BOLD%^%^BLACK%^Rolon appears to be a massive-sized human. He is wearing %^YELLOW%^flashy " "clothing%^BLACK%^, including a %^BLUE%^luxurious coat %^BLACK%^lined with %^YELLOW%^gold piping%^BLACK%^. " "This man appears to be a %^CYAN%^man of the seas %^BLACK%^somehow, due to the assorted jewelry adorning his " "body, the eye patch that covers a %^RED%^gruesome scar %^BLACK%^over his right eye, and the sailor's trousers " "that adorn his legs. A %^WHITE%^gleaming suit of chainmail %^BLACK%^is strapped across his torso, while, " "lastly, a pair of dashing %^YELLOW%^boots made for a swashbuckler%^BLACK%^, with the initials %^RED%^RG" "%^BLACK%^ inscribed on the side of each boot, cover his feet.%^RESET%^"); set_race("human"); set_body_type("human"); set_gender("male"); set_alignment(3); set_diety("bane"); set_class("fighter"); set_hd(26,10); set_guild_level("fighter",26); set_mlevel("fighter",26); set_max_hp(1000); set_hp(1000); set_exp(8000); set_overall_ac(0); set("aggressive",3); set_stats("strength",18); set_stats("intelligence",14); set_stats("wisdom",14); set_stats("dexterity",16); set_stats("constitution",17); set_stats("charisma",13); set_property("full attacks",1); set_wielding_limbs(({"left hand","right hand"})); command("message in %^RESET%^enters nimbly and nonchalantly."); command("message out %^RESET%^saunters $D %^RESET%^nonchalantly."); command("speech %^BOLD%^%^GREEN%^state with a %^MAGENTA%^swashbuckling tone%^RESET%^"); equip = new(OBJ"boots"); equip->set_property("monsterweapon",1); equip->move(TO); command("wear boots"); equip = new("/d/deku/sanctuary/obj/lightningbolt"); equip->set_property("monsterweapon",1); equip->move(TO); command("wield rapier"); equip = new(OBJ"rapier"); equip->set_property("monsterweapon",1); equip->move(TO); command("wield rapier"); equip = new(OBJ"newcoat"); if(!random(3)) equip->set_property("monsterweapon",1); equip->move(TO); command("wear coat"); equip = new("/d/retired/obj/rolonchain"); equip->move(TO); command("wear chain"); equip = new("/d/dagger/Torm/obj/eyepatch"); if(random(3)) equip->set_property("monsterweapon",1); equip->move(TO); command("wear eyepatch"); equip = new("/d/dagger/Torm/obj/pantsc"); if(random(3)) equip->set_property("monsterweapon",1); equip->move(TO); command("wear pants"); set_funcs(({"slash","slash","rushem","flashem","flashem","shatterit","sunderit"})); set_func_chance(80); set_fighter_style("swashbuckler"); set("aggresive","agg_fun"); set_property("swarm",1); add_money("silver", random(300)+100); add_money("copper", random(30)+10); set_mob_magic_resistance("average"); set_monster_feats(({ "ambidexterity", "two weapon fighting", "improved two weapon fighting", "whirl", "unassailable parry", "toughness", "improved toughness", "damage resistance", "regeneration", "powerattack", "shatter", "sunder", "rush", })); set_emotes(1, ({"%^BOLD%^%^GREEN%^Rolon states with a %^BOLD%^%^MAGENTA%^swashbuckling tone:%^BOLD%^" "%^GREEN%^ Arghh.%^RESET%^","%^BOLD%^%^GREEN%^Rolon %^RESET%^swears like a %^BOLD%^%^CYAN%^drunken sailor" "%^RESET%^.","%^BOLD%^%^GREEN%^Rolon %^BOLD%^%^CYAN%^breathes %^RESET%^in nonchalantly and as he %^BOLD%^" "%^CYAN%^exhales%^RESET%^, a %^BOLD%^%^CYAN%^stiff breeze of the sea %^RESET%^can be smelled in the %^BOLD%^" "%^CYAN%^air%^RESET%^.","%^BOLD%^%^GREEN%^Rolon %^RESET%^flips a %^BOLD%^%^YELLOW%^golden coin %^RESET%^into " "the %^BOLD%^%^CYAN%^air%^RESET%^, catching it in his hands as it falls back down quickly.%^RESET%^","%^BOLD%^" "%^GREEN%^Rolon %^RESET%^begins to emit a %^BOLD%^%^GREEN%^bright green aura %^RESET%^momentarily for some " "reason, before it slowly fades away.%^RESET%^" }), 0); } int agg_fun() { if (TP->query_invis() && !TP->query_true_invis()) TP->set_invis(0); if (sizeof(TO->query_attackers()) < 1) force_me("say En Garde!"); force_me("kill "+TP->query_name()); } void slash(object targ){ tell_object(targ,"%^BOLD%^%^MAGENTA%^Rolon leaps forward in a riposte immediately following your attack, " "giving you no chance to deflect the blow!%^RESET%^"); tell_room(environment(targ),"%^BOLD%^%^BLUE%^Rolon leaps forward in a riposte immediately following " +targ->QCN+"'s attack, giving "+targ->QO+" no chance to deflect the blow!%^RESET%^",targ); targ->do_damage("torso",random(15)+20); } void rushem(object targ){ command("rush "+targ->query_cap_name()); } void flashem(object targ){ command("flash "+targ->query_cap_name()); } void sunderit(object targ){ if(!objectp(targ)) return; if(!objectp(TO)) return; TO->force_me("sunder "+targ->query_name()); } void shatterit(object targ){ if(!objectp(targ)) return; if(!objectp(TO)) return; TO->force_me("shatter "+targ->query_name()); }
39.992593
129
0.642712
[ "object" ]
0c3a4355e85b26057b64e8e4de1a4fc42d4469b0
34,798
h
C
media_driver/agnostic/common/codec/hal/codechal_decode_vc1.h
xinfengz/media-driver
310104a4693c476a215de13e7e9fabdf2afbad0a
[ "Intel", "BSD-3-Clause", "MIT" ]
1
2019-09-26T23:48:34.000Z
2019-09-26T23:48:34.000Z
media_driver/agnostic/common/codec/hal/codechal_decode_vc1.h
xinfengz/media-driver
310104a4693c476a215de13e7e9fabdf2afbad0a
[ "Intel", "BSD-3-Clause", "MIT" ]
null
null
null
media_driver/agnostic/common/codec/hal/codechal_decode_vc1.h
xinfengz/media-driver
310104a4693c476a215de13e7e9fabdf2afbad0a
[ "Intel", "BSD-3-Clause", "MIT" ]
1
2017-12-11T03:28:35.000Z
2017-12-11T03:28:35.000Z
/* * Copyright (c) 2011-2017, Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ //! //! \file codechal_decode_vc1.h //! \brief Defines the decode interface extension for VC1. //! \details Defines all types, macros, and functions required by CodecHal for VC1 decoding. //! Definitions are not externally facing. //! #ifndef __CODECHAL_DECODER_VC1_H__ #define __CODECHAL_DECODER_VC1_H__ #include "codechal_decoder.h" //! //! \def CODECHAL_DECODE_VC1_UNEQUAL_FIELD_WA_SURFACES //! Unequal Field Surface max index //! #define CODECHAL_DECODE_VC1_UNEQUAL_FIELD_WA_SURFACES 4 //! //! \def CODECHAL_DECODE_VC1_NUM_SYNC_TAGS //! Sync Tags Number for StateHeap Settings //! #define CODECHAL_DECODE_VC1_NUM_SYNC_TAGS 36 //! //! \def CODECHAL_DECODE_VC1_INITIAL_DSH_SIZE //! Initial Dsh Size for StateHeap Settings //! #define CODECHAL_DECODE_VC1_INITIAL_DSH_SIZE (MHW_PAGE_SIZE * 2) //! //! \def CODECHAL_DECODE_VC1_FAST_CHROMA_MV //! Fast Chroma Mv calculation //! #define CODECHAL_DECODE_VC1_FAST_CHROMA_MV(cmv) ((cmv) - ((cmv) % 2)) //! //! \def CODECHAL_DECODE_VC1_CHROMA_MV //! Chroma Mv calculation //! #define CODECHAL_DECODE_VC1_CHROMA_MV(lmv) (((lmv) + CODECHAL_DECODE_VC1_RndTb[(lmv) & 3]) >> 1) //! //! \def CODECHAL_DECODE_VC1_BITSTRM_BUF_LEN //! Bitstream Buffer Length //! #define CODECHAL_DECODE_VC1_BITSTRM_BUF_LEN 8 //! //! \def CODECHAL_DECODE_VC1_STUFFING_BYTES //! #define CODECHAL_DECODE_VC1_STUFFING_BYTES 64 //! //! \def CODECHAL_DECODE_VC1_SC_PREFIX_LENGTH //! Sc prefix lengith //! #define CODECHAL_DECODE_VC1_SC_PREFIX_LENGTH 3 //! //! \enum CODECHAL_DECODE_VC1_BINDING_TABLE_OFFSET_OLP //! VC1 OLP Binding Table Offset //! typedef enum _CODECHAL_DECODE_VC1_BINDING_TABLE_OFFSET_OLP { CODECHAL_DECODE_VC1_OLP_SRC_Y = 0, CODECHAL_DECODE_VC1_OLP_SRC_UV = 1, CODECHAL_DECODE_VC1_OLP_DST_Y = 3, CODECHAL_DECODE_VC1_OLP_DST_UV = 4, CODECHAL_DECODE_VC1_OLP_NUM_SURFACES = 6 }CODECHAL_DECODE_VC1_BINDING_TABLE_OFFSET_OLP; //! //! \enum CODECHAL_DECODE_VC1_DMV_INDEX //! VC1 DMV index //! typedef enum _CODECHAL_DECODE_VC1_DMV_INDEX { CODECHAL_DECODE_VC1_DMV_EVEN = 0, CODECHAL_DECODE_VC1_DMV_ODD = 1, CODECHAL_DECODE_VC1_DMV_MAX = 2 }CODECHAL_DECODE_VC1_DMV_INDEX; //! //! \struct CODECHAL_DECODE_VC1_I_LUMA_BLOCKS //! \brief Define Look Up Table Structure for Luma Polarity of Interlaced Picture //! typedef struct _CODECHAL_DECODE_VC1_I_LUMA_BLOCKS { uint8_t u8NumSamePolarity; union { uint8_t u8Polarity; uint8_t u8MvIndex0; }; uint8_t u8MvIndex1; uint8_t u8MvIndex2; uint8_t u8MvIndex3; }CODECHAL_DECODE_VC1_I_LUMA_BLOCKS; //! //! \struct CODECHAL_DECODE_VC1_P_LUMA_BLOCKS //! \brief Define Look Up Table for Luma Inter-coded Blocks of Progressive Picture //! typedef struct _CODECHAL_DECODE_VC1_P_LUMA_BLOCKS { uint8_t u8NumIntercodedBlocks; uint8_t u8MvIndex1; uint8_t u8MvIndex2; uint8_t u8MvIndex3; }CODECHAL_DECODE_VC1_P_LUMA_BLOCKS; //! //! \struct CODECHAL_DECODE_VC1_BITSTREAM //! \brief Define variables for VC1 bitstream //! typedef struct _CODECHAL_DECODE_VC1_BITSTREAM { uint8_t* pOriginalBitBuffer; // pointer to the original capsuted bitstream uint8_t* pOriginalBufferEnd; // pointer to the end of the original uncapsuted bitstream uint32_t u32ZeroNum; // number of continuous zeros before the current bype. uint32_t u32ProcessedBitNum; // number of bits being processed from initiation uint8_t CacheBuffer[CODECHAL_DECODE_VC1_BITSTRM_BUF_LEN + 4]; // cache buffer of uncapsuted raw bitstream uint32_t* pu32Cache; // pointer to the cache buffer uint32_t* pu32CacheEnd; // pointer to the updating end of the cache buffer uint32_t* pu32CacheDataEnd; // pointer to the last valid uint32_t of the cache buffer int32_t iBitOffset; // offset = 32 is the MSB, offset = 1 is the LSB. int32_t iBitOffsetEnd; // bit offset of the last valid uint32_t bool bIsEBDU; // 1 if it is EBDU and emulation prevention bytes are present. } CODECHAL_DECODE_VC1_BITSTREAM, *PCODECHAL_DECODE_VC1_BITSTREAM; //! //! \struct CODECHAL_DECODE_VC1_OLP_PARAMS //! \brief Define variables of VC1 Olp params for hw cmd //! typedef struct _CODECHAL_DECODE_VC1_OLP_PARAMS { PMOS_COMMAND_BUFFER pCmdBuffer; PMHW_PIPE_CONTROL_PARAMS pPipeControlParams; PMHW_STATE_BASE_ADDR_PARAMS pStateBaseAddrParams; PMHW_VFE_PARAMS pVfeParams; PMHW_CURBE_LOAD_PARAMS pCurbeLoadParams; PMHW_ID_LOAD_PARAMS pIdLoadParams; }CODECHAL_DECODE_VC1_OLP_PARAMS, *PCODECHAL_DECODE_VC1_OLP_PARAMS; //! //! \struct CODECHAL_DECODE_VC1_OLP_STATIC_DATA //! \brief Define VC1 OLP Static Data //! typedef struct _CODECHAL_DECODE_VC1_OLP_STATIC_DATA { // uint32_t 0 union { struct { uint32_t Reserved; }; struct { uint32_t Value; }; } DW0; // uint32_t 1 union { struct { uint32_t BlockWidth : 16; // in byte uint32_t BlockHeight : 16; // in byte }; struct { uint32_t Value; }; } DW1; // uint32_t 2 union { struct { uint32_t Profile : 1; uint32_t RangeExpansionFlag : 1; // Simple & Main Profile only uint32_t PictureUpsamplingFlag : 2; // 2:H, 3:V uint32_t : 1; uint32_t InterlaceFieldFlag : 1; uint32_t : 2; uint32_t RangeMapUV : 3; uint32_t RangeMapUVFlag : 1; uint32_t RangeMapY : 3; uint32_t RangeMapYFlag : 1; uint32_t : 4; uint32_t ComponentFlag : 1; uint32_t : 11; }; struct { uint32_t Value; }; } DW2; // uint32_t 3 union { struct { uint32_t Reserved; }; struct { uint32_t Value; }; } DW3; // uint32_t 4 union { struct { uint32_t SourceDataBindingIndex; }; struct { uint32_t Value; }; } DW4; // uint32_t 5 union { struct { uint32_t DestDataBindingIndex; }; struct { uint32_t Value; }; } DW5; // uint32_t 6 union { struct { uint32_t Reserved; }; struct { uint32_t Value; }; } DW6; // uint32_t 7 union { struct { uint32_t Reserved; }; struct { uint32_t Value; }; } DW7; } CODECHAL_DECODE_VC1_OLP_STATIC_DATA, *PCODECHAL_DECODE_VC1_OLP_STATIC_DATA; //! //! \def CODECHAL_DECODE_VC1_CURBE_SIZE_OLP //! VC1 Curbe Size for Olp //! #define CODECHAL_DECODE_VC1_CURBE_SIZE_OLP (sizeof(CODECHAL_DECODE_VC1_OLP_STATIC_DATA)) //! //! \struct CODECHAL_DECODE_VC1_KERNEL_HEADER_CM //! \brief Define VC1 Kernel Header CM //! typedef struct _CODECHAL_DECODE_VC1_KERNEL_HEADER_CM { int nKernelCount; CODECHAL_KERNEL_HEADER OLP; CODECHAL_KERNEL_HEADER IC; } CODECHAL_DECODE_VC1_KERNEL_HEADER_CM, *PCODECHAL_DECODE_VC1_KERNEL_HEADER_CM; //*------------------------------------------------------------------------------ //* Codec Definitions //*------------------------------------------------------------------------------ //! //! \class CodechalDecodeVc1 //! \brief This class defines the member fields, functions etc used by VC1 decoder. //! class CodechalDecodeVc1 : public CodechalDecode { public: //! //! \brief Constructor //! \param [in] hwInterface //! Hardware interface //! \param [in] debugInterface //! Debug interface //! \param [in] standardInfo //! The information of decode standard for this instance //! CodechalDecodeVc1( CodechalHwInterface *hwInterface, CodechalDebugInterface* debugInterface, PCODECHAL_STANDARD_INFO standardInfo); //! //! \brief Destructor //! ~CodechalDecodeVc1(); //! //! \brief Allocate and initialize VC1 decoder standard //! \param [in] settings //! Pointer to CODECHAL_SETTINGS //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS AllocateStandard( PCODECHAL_SETTINGS settings) override; //! //! \brief Set states for each frame to prepare for VC1 decode //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS SetFrameStates() override; //! //! \brief VC1 decoder state level function //! \details State level function for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS DecodeStateLevel() override; //! //! \brief VC1 decoder primitive level function //! \details Primitive level function for GEN specific VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS DecodePrimitiveLevel() override; MOS_STATUS InitMmcState() override; //! //! \brief VC1 decoder primitive level function for VLD mode //! \details Primitive level function for GEN specific VC1 decoder for VLD mode //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! virtual MOS_STATUS DecodePrimitiveLevelVLD(); //! //! \brief VC1 decoder primitive level function for IT mode //! \details Primitive level function for GEN specific VC1 decoder for IT mode //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! virtual MOS_STATUS DecodePrimitiveLevelIT(); // no downsampling //! //! \brief Allocate resources for VC1 decoder //! \details Allocate resources for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! virtual MOS_STATUS AllocateResources(); //! //! \brief Set GEN specific Curbe data for VC1 OLP //! \details Configure Curbe data for VC1 OLP Y / UV component //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! virtual MOS_STATUS SetCurbeOlp(); //! //! \brief Update VC1 Kernel State //! \details Get Decode Kernel and Update Kernel State //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! virtual MOS_STATUS UpdateVc1KernelState(); virtual MOS_STATUS AddVc1OlpCmd( PCODECHAL_DECODE_VC1_OLP_PARAMS vc1OlpParams); //! //! \brief Return if Olp needed //! \details Return value of member bOlpNeeded //! \return bool //! true if Olp needed, else false //! bool IsOlpNeeded() { return bOlpNeeded; }; PCODEC_VC1_PIC_PARAMS pVc1PicParams = nullptr; //!< VC1 Picture Params MOS_SURFACE sDestSurface; //!< Pointer to MOS_SURFACE of render surface PMOS_RESOURCE presReferences[CODEC_MAX_NUM_REF_FRAME_NON_AVC]; //!< Reference Resources Handle list bool bDeblockingEnabled = false; //!< Indicator of deblocking enabling bool bUnequalFieldWaInUse = false; //!< Indicator of Unequal Field WA protected: //! //! \brief Construct VC1 decode bitstream buffer //! \details For WaVC1ShortFormat. Construct VC1 decode bistream buffer by // adding a stuffing byte ahead of frame bitstream data. It's for // simple & main profile short format only. //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ConstructBistreamBuffer(); //! //! \brief Handle VC1 skipped frame //! \details For skipped frame, use reference frame instead //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! virtual MOS_STATUS HandleSkipFrame(); //! //! \brief Initialize Unequal Field Surface //! \details Initialize Unequal Field Surface for VC1 decoder //! \param [in] refListIdx //! Index for pic in RefList //! \param [in] nullHwInUse //! Indicate if null HW is in use or not //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS InitializeUnequalFieldSurface( uint8_t refListIdx, bool nullHwInUse); //! //! \brief Formats destination surface for VC1 decoder //! \details Formats the destination surface, in the pack case the UV surface // is moved to be adjacent to the UV surface such that NV12 // formatting is maintained when the surface is returned to SW, // in the unpack case the UV surface is moved to be 32 - pixel rows // away from the Y surface so that during decoding HW will not // overwrite the UV surface //! \param [in] srcSurface //! Source Surface //! \param [in] dstSurface //! Destiny Surface //! \param [in] pack //! Indicate pack case or unpack case //! \param [in] nullHwInUse //! Indicate if null HW is in use or not //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS FormatUnequalFieldPicture( MOS_SURFACE srcSurface, MOS_SURFACE dstSurface, bool pack, bool nullHwInUse); //! //! \brief Parse Picture Header for VC1 decoder //! \details Parse Picture Header in bitstream for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParsePictureHeader(); //! //! \brief Parse Picture Header for VC1 decoder Advanced profile //! \details Parse Picture Header in bitstream for VC1 decoder Advaced profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParsePictureHeaderAdvanced(); //! //! \brief Parse Picture Header for VC1 decoder Simple profile //! \details Parse Picture Header in bitstream for VC1 decoder Simple profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParsePictureHeaderMainSimple(); //! //! \brief Initialise bitstream for VC1 decoder //! \details Initialise members' value of bitstream struct for VC1 decoder //! \param [in] buffer //! Original bitstream buffer //! \param [in] length //! Original bitstream length //! \param [in] isEBDU //! Indicate if it is EBDU //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS InitialiseBitstream( uint8_t* buffer, uint32_t length, bool isEBDU); //! //! \brief Parse bitplane for VC1 decoder //! \details Parse bitplane according to bitplane mode for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseBitplane(); //! //! \brief Parse bitplane in Norm2 Mode for VC1 decoder //! \details Parse bitplane in Norm2 Mode for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS BitplaneNorm2Mode(); //! //! \brief Parse bitplane in Norm6 Mode for VC1 decoder //! \details Parse bitplane in Norm6 Mode for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS BitplaneNorm6Mode(); //! //! \brief Parse bitplane in Rowskip Mode for VC1 decoder //! \details Parse bitplane in Rowskip Mode for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS BitplaneRowskipMode(); //! //! \brief Parse bitplane in Colskip Mode for VC1 decoder //! \details Parse bitplane in Colskip Mode for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS BitplaneColskipMode(); //! //! \brief Parse bitplane quantization for VC1 decoder //! \details Parse bitplane quantization for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseVopDquant(); //! //! \brief Parse Mv Range for VC1 decoder //! \details Parse Mv Range for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseMvRange(); //! //! \brief Parse Progressive Mv Mode for VC1 decoder //! \details Parse Progressive Mv Mode for VC1 decoder //! \param [in] MvModeTable[] //! const MV Mode Table //! \param [out] pu32MvMode //! pointer to Mv Mode //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseProgressiveMvMode( const uint32_t MvModeTable[], uint32_t* pu32MvMode); //! //! \brief Parse Interlace Mv Mode for VC1 decoder //! \details Parse Interlace Mv Mode for VC1 decoder //! \param [in] isPPicture //! indicate if it is P picture //! \param [out] mvmode //! pointer to Mv Mode //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseInterlaceMVMode( bool isPPicture, uint32_t* mvmode); //! //! \brief Parse I Picture Layer for VC1 decoder //! \details Parse I Picture Layer for VC1 decoder advanced profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParsePictureLayerIAdvanced(); //! //! \brief Parse P Picture Layer for VC1 decoder //! \details Parse P Picture Layer for VC1 decoder advanced profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParsePictureLayerPAdvanced(); //! //! \brief Parse B Picture Layer for VC1 decoder //! \details Parse B Picture Layer for VC1 decoder advanced profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParsePictureLayerBAdvanced(); //! //! \brief Parse P Field Picture Layer for VC1 decoder //! \details Parse P Field Picture Layer for VC1 decoder advanced profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseFieldPictureLayerPAdvanced(); //! //! \brief Parse B Field Picture Layer for VC1 decoder //! \details Parse B Field Picture Layer for VC1 decoder advanced profile //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS ParseFieldPictureLayerBAdvanced(); //! //! \brief Get Macroblock Offset for VC1 decoder //! \details Get Macroblock Offset for VC1 decoder slice params //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS GetSliceMbDataOffset(); //! //! \brief Perform Olp for VC1 decoder //! \details Perform Olp for VC1 decoder //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS PerformVc1Olp(); //! //! \brief Initializes the VC1 OLP state //! \details Initializes the VC1 OLP state based on parameters saved in InitInterface //! command buffer or indirect state //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS InitKernelStateVc1Olp(); //! //! \brief Pack Motion Vectors in Macro Block State //! \param [in] vc1MbState //! Pointer to Vc1 Macro Block State //! \param [in] mv //! Pointer to Motion Vector //! \param [out] packedLumaMvs //! Pointer to Packed Luma Motion Vectors //! \param [out] packedChromaMv //! Pointer to Packed Chroma Motion Vectors //! \return void //! virtual void PackMotionVectors( PMHW_VDBOX_VC1_MB_STATE vc1MbState, int16_t *mv, int16_t *packedLumaMvs, int16_t *packedChromaMv); // Parameters passed by application uint16_t u16PicWidthInMb = 0; //!< Picture Width in MB width count uint16_t u16PicHeightInMb = 0; //!< Picture Height in MB height count bool bIntelProprietaryFormatInUse = false; //!< Indicator of using a Intel proprietary entrypoint. bool bShortFormatInUse = false; //!< Short format slice data bool bVC1OddFrameHeight = false; //!< VC1 Odd Frame Height uint32_t u32DataSize = 0; //!< Size of the data contained in presDataBuffer uint32_t u32DataOffset = 0; //!< Offset of the data contained in presDataBuffer uint32_t u32NumSlices = 0; //!< [VLD mode] Number of slices to be decoded uint32_t u32NumMacroblocks = 0; //!< [IT mode] Number of MBs to be decoded uint32_t u32NumMacroblocksUV = 0; //!< [IT mode] Number of UV MBs to be decoded PCODEC_VC1_SLICE_PARAMS pVc1SliceParams = nullptr; //!< VC1 Slice Params PCODEC_VC1_MB_PARAMS pVc1MbParams = nullptr; //!< VC1 Macro Block Params MOS_SURFACE sDeblockSurface; //!< Deblock Surface MOS_RESOURCE resDataBuffer; //!< Handle of residual difference surface MOS_RESOURCE resBitplaneBuffer; //!< Handle of Bitplane buffer uint8_t* pDeblockDataBuffer = nullptr; //!< Pointer to the deblock data // Internally maintained MOS_RESOURCE resMfdDeblockingFilterRowStoreScratchBuffer; //!< Handle of MFD Deblocking Filter Row Store Scratch data surface MOS_RESOURCE resBsdMpcRowStoreScratchBuffer; //!< Handle of BSD/MPC Row Store Scratch data surface MOS_RESOURCE resVc1BsdMvData[CODECHAL_DECODE_VC1_DMV_MAX]; //!< Handle of VC1 BSD MV Data PCODECHAL_VC1_VLD_SLICE_RECORD pVldSliceRecord = nullptr; //!< [VLD mode] Slice record PCODEC_REF_LIST pVc1RefList[CODECHAL_NUM_UNCOMPRESSED_SURFACE_VC1]; //!< VC1 Reference List MOS_RESOURCE resSyncObject; //!< Handle of Sync Object MOS_RESOURCE resPrivateBistreamBuffer; //!< Handle of Private Bistream Buffer uint32_t u32PrivateBistreamBufferSize = 0; //!< Size of Private Bistream Buffer CODECHAL_DECODE_VC1_BITSTREAM Bitstream; //!< VC1 Bitstream // PCODECHAL_DECODE_VC1_BITSTREAM pBitstream; //!< Pointer to Bitstream uint16_t u16PrevAnchorPictureTFF = 0; //!< Previous Anchor Picture Top Field First(TFF) bool bPrevEvenAnchorPictureIsP = false; //!< Indicator of Previous Even Anchor Picture P frame bool bPrevOddAnchorPictureIsP = false; //!< Indicator of Previous Odd Anchor Picture P frame uint16_t u16ReferenceDistance = 0; //!< REFDIST. // OLP related MHW_KERNEL_STATE OlpKernelState; //!< Olp Kernel State uint8_t* OlpKernelBase = nullptr; //!< Pointer to Kernel Base Address uint32_t OlpKernelSize = 0; //!< Olp Kernel Size bool bOlpNeeded = false; //!< Indicator if Olp Needed uint16_t u16OlpPicWidthInMb = 0; //!< Width of Olp Pic in Macro block uint16_t u16OlpPicHeightInMb = 0; //!< Height of Olp Pic in Macro block uint32_t u32OlpCurbeStaticDataLength = 0; //!< Olp Curbe Static Data Length uint32_t u32OlpDshSize = 0; //!< Olp DSH Size // IT mode related MHW_BATCH_BUFFER ItObjectBatchBuffer; //!< IT mode Object Batch Buffer uint8_t bFieldPolarity = 0; //!< Field Polarity Offset MOS_SURFACE sUnequalFieldSurface[CODECHAL_DECODE_VC1_UNEQUAL_FIELD_WA_SURFACES]; //!< Handle of Unequal Field Surface uint8_t u8UnequalFieldRefListIdx[CODECHAL_DECODE_VC1_UNEQUAL_FIELD_WA_SURFACES]; //!< Reference list of Unequal Field Surface uint8_t u8UnequalFieldSurfaceForBType = 0; //!< Unequal Field Surface Index for B frame uint8_t u8CurrUnequalFieldSurface = 0; //!< Current Unequal Field Surface Index // HuC copy related bool bHuCCopyInUse; //!< a sync flag used when huc copy and decoder run in the different VDBOX MOS_RESOURCE resSyncObjectWaContextInUse; //!< signals on the video WA context MOS_RESOURCE resSyncObjectVideoContextInUse; //!< signals on the video context private: //! //! \brief Wrapper function to read bits from VC1 bitstream //! \param [in] bitsRead //! Number of bits to be read //! \param [out] value //! VC1 bitstream status, EOS if reaching end of stream, else bitstream value //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS GetBits(uint32_t bitsRead, uint32_t &value); //! //! \brief Wrapper function to get VLC from VC1 bitstream according to VLC Table //! \param [in] table //! Pointer to VLC Table //! \param [out] value //! VC1 bitstream status, EOS if reaching end of stream, else bitstream value //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS GetVLC(const uint32_t* table, uint32_t & value); //! //! \brief Wrapper function to skip words from VC1 bitstream //! \param [in] dwordNumber //! Number of Dword to be skipped //! \param [out] value //! VC1 bitstream status, EOS if reaching end of stream, else bitstream value //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS SkipWords(uint32_t dwordNumber, uint32_t & value); //! //! \brief Wrapper function to skip bits from VC1 bitstream //! \param [in] bits //! Number of bits to be skipped //! \param [out] value //! VC1 bitstream status, EOS if reaching end of stream, else bitstream value //! \return MOS_STATUS //! MOS_STATUS_SUCCESS if success, else fail reason //! MOS_STATUS SkipBits(uint32_t bits, uint32_t & value); //! //! \brief Read bits from VC1 bitstream //! \param [in] u32BitsRead //! Number of bits to be read //! \return uint32_t //! EOS if reaching end of stream, else bitstream value //! uint32_t GetBits(uint32_t bitsRead); //! //! \brief Update VC1 bitstream memeber value //! \return uint32_t //! EOS if reaching end of stream, else bitstream value //! uint32_t UpdateBitstreamBuffer(); //! //! \brief Get VLC from VC1 bitstream according to VLC Table //! \param [in] table //! Pointer to VLC Table //! \return uint32_t //! EOS if reaching end of stream, else bitstream value //! uint32_t GetVLC(const uint32_t *table); //! //! \brief Read bits from VC1 bitstream and don't update bitstream pointer //! \param [in] u32BitsRead //! Number of bits to be read //! \return uint32_t //! EOS if reaching end of stream, else bitstream value //! uint32_t PeekBits(uint32_t bitsRead); //! //! \brief Skip bits from VC1 bitstream //! \param [in] u32Bits //! Number of bits to be skipped //! \return uint32_t //! EOS if reaching end of stream, else bitstream value //! uint32_t SkipBits(uint32_t bitsRead); //! //! \brief Pack Chroma/Luma Motion Vectors for Interlaced frame //! \param [in] u16FieldSelect //! Field Select Index //! \param [in] u16CurrentField //! Current Filed Indicator //! \param [in] bFastUVMotionCompensation //! Fast UV Motion Compensation Indicator //! \param [out] pLmv //! Pointer to Adjusted Luma Motion Vectors //! \param [out] pCmv //! Pointer to Adjusted Chroma Motion Vectors //! \return void //! uint8_t PackMotionVectors_Chroma4MvI( uint16_t fieldSelect, uint16_t currentField, bool fastUVMotionCompensation, int16_t *lmv, int16_t *cmv); //! //! \brief Pack Chroma/Luma Motion Vectors for Picture frame //! \param [in] intraFlags //! Intra Flag Index //! \param [out] lmv //! Pointer to Adjusted Luma Motion Vectors //! \param [out] cmv //! Pointer to Adjusted Chroma Motion Vectors //! \return void //! void PackMotionVectors_Chroma4MvP(uint16_t intraFlags, int16_t *lmv, int16_t *cmv); //! //! \brief Find Median for 3 MVs //! \param [in] mv# //! Motion Vectors //! \return int16_t //! return median for 3 MVs //! int16_t PackMotionVectors_Median3(int16_t mv1, int16_t mv2, int16_t mv3); //! //! \brief Find Median for 4 MVs //! \param [in] mv# //! Motion Vectors //! \return int16_t //! return median for 4 MVs //! int16_t PackMotionVectors_Median4(int16_t mv1, int16_t mv2, int16_t mv3, int16_t mv4); #if USE_CODECHAL_DEBUG_TOOL MOS_STATUS DumpPicParams( PCODEC_VC1_PIC_PARAMS vc1PicParams); MOS_STATUS DumpSliceParams( PCODEC_VC1_SLICE_PARAMS sliceControl); MOS_STATUS DumpMbParams( PCODEC_VC1_MB_PARAMS mbParams); #endif }; #endif // __CODECHAL_DECODER_VC1_H__
38.197585
156
0.574861
[ "render", "object", "vector" ]
0c3fa81b2b684fc605d700b07d66c6c308631f2a
7,925
c
C
tools/tpm2_create.c
asac/tpm2-tools
8e3c266f4de942ae6ab1bfbe0cb947ce36237746
[ "BSD-3-Clause" ]
1
2020-11-03T21:27:05.000Z
2020-11-03T21:27:05.000Z
tools/tpm2_create.c
braincorp/tpm2-tools
b9c8108561237a344b063e9c45e5353dc9114276
[ "BSD-3-Clause" ]
null
null
null
tools/tpm2_create.c
braincorp/tpm2-tools
b9c8108561237a344b063e9c45e5353dc9114276
[ "BSD-3-Clause" ]
null
null
null
/* SPDX-License-Identifier: BSD-3-Clause */ #include <stdlib.h> #include <string.h> #include "files.h" #include "log.h" #include "tpm2.h" #include "tpm2_alg_util.h" #include "tpm2_auth_util.h" #include "tpm2_options.h" #define DEFAULT_ATTRS \ TPMA_OBJECT_DECRYPT|TPMA_OBJECT_SIGN_ENCRYPT|TPMA_OBJECT_FIXEDTPM \ |TPMA_OBJECT_FIXEDPARENT|TPMA_OBJECT_SENSITIVEDATAORIGIN \ |TPMA_OBJECT_USERWITHAUTH typedef struct tpm_create_ctx tpm_create_ctx; struct tpm_create_ctx { struct { const char *ctx_path; const char *auth_str; tpm2_loaded_object object; } parent; struct { TPM2B_SENSITIVE_CREATE sensitive; TPM2B_PUBLIC public; char *sealed_data; char *public_path; char *private_path; char *auth_str; const char *ctx_path; char *alg; char *attrs; char *name_alg; char *policy; } object; struct { UINT8 b :1; UINT8 i :1; UINT8 L :1; UINT8 u :1; UINT8 r :1; UINT8 G :1; } flags; }; #define DEFAULT_KEY_ALG "rsa2048" static tpm_create_ctx ctx = { .object = { .alg = DEFAULT_KEY_ALG }, }; static tool_rc create(ESYS_CONTEXT *ectx) { tool_rc rc = tool_rc_general_error; TPM2B_DATA outside_info = TPM2B_EMPTY_INIT; TPML_PCR_SELECTION creation_pcr = { .count = 0 }; TPM2B_PUBLIC *out_public; TPM2B_PRIVATE *out_private; ESYS_TR object_handle = ESYS_TR_NONE; if (ctx.object.ctx_path) { size_t offset = 0; TPM2B_TEMPLATE template = { .size = 0 }; tool_rc tmp_rc = tpm2_mu_tpmt_public_marshal( &ctx.object.public.publicArea, &template.buffer[0], sizeof(TPMT_PUBLIC), &offset); if (tmp_rc != tool_rc_success) { return tmp_rc; } template.size = offset; tmp_rc = tpm2_create_loaded(ectx, &ctx.parent.object, &ctx.object.sensitive, &template, &object_handle, &out_private, &out_public); if (tmp_rc != tool_rc_success) { return tmp_rc; } } else { TPM2B_CREATION_DATA *creation_data; TPM2B_DIGEST *creation_hash; TPMT_TK_CREATION *creation_ticket; tool_rc tmp_rc = tpm2_create(ectx, &ctx.parent.object, &ctx.object.sensitive, &ctx.object.public, &outside_info, &creation_pcr, &out_private, &out_public, &creation_data, &creation_hash, &creation_ticket); if (tmp_rc != tool_rc_success) { return tmp_rc; } free(creation_data); free(creation_hash); free(creation_ticket); } tpm2_util_public_to_yaml(out_public, NULL); if (ctx.flags.u) { bool res = files_save_public(out_public, ctx.object.public_path); if (!res) { goto out; } } if (ctx.flags.r) { bool res = files_save_private(out_private, ctx.object.private_path); if (!res) { goto out; } } if (ctx.object.ctx_path) { rc = files_save_tpm_context_to_path(ectx, object_handle, ctx.object.ctx_path); } else { rc = tool_rc_success; } out: free(out_private); free(out_public); return rc; } static bool on_option(char key, char *value) { switch (key) { case 'P': ctx.parent.auth_str = value; break; case 'p': ctx.object.auth_str = value; break; case 'g': ctx.object.name_alg = value; break; case 'G': ctx.object.alg = value; ctx.flags.G = 1; break; case 'a': ctx.object.attrs = value; ctx.flags.b = 1; break; case 'i': ctx.object.sealed_data = strcmp("-", value) ? value : NULL; ctx.flags.i = 1; break; case 'L': ctx.object.policy = value; ctx.flags.L = 1; break; case 'u': ctx.object.public_path = value; ctx.flags.u = 1; break; case 'r': ctx.object.private_path = value; ctx.flags.r = 1; break; case 'C': ctx.parent.ctx_path = value; break; case 'c': ctx.object.ctx_path = value; break; }; return true; } bool tpm2_tool_onstart(tpm2_options **opts) { static struct option topts[] = { { "parent-auth", required_argument, NULL, 'P' }, { "key-auth", required_argument, NULL, 'p' }, { "hash-algorithm", required_argument, NULL, 'g' }, { "key-algorithm", required_argument, NULL, 'G' }, { "attributes", required_argument, NULL, 'a' }, { "sealing-input", required_argument, NULL, 'i' }, { "policy", required_argument, NULL, 'L' }, { "public", required_argument, NULL, 'u' }, { "private", required_argument, NULL, 'r' }, { "parent-context", required_argument, NULL, 'C' }, { "key-context", required_argument, NULL, 'c' }, }; *opts = tpm2_options_new("P:p:g:G:a:i:L:u:r:C:c:", ARRAY_LEN(topts), topts, on_option, NULL, 0); return *opts != NULL; } static bool load_sensitive(void) { ctx.object.sensitive.sensitive.data.size = BUFFER_SIZE( typeof(ctx.object.sensitive.sensitive.data), buffer); return files_load_bytes_from_buffer_or_file_or_stdin(NULL, ctx.object.sealed_data, &ctx.object.sensitive.sensitive.data.size, ctx.object.sensitive.sensitive.data.buffer); } static tool_rc check_options(void) { if (!ctx.parent.ctx_path) { LOG_ERR("Must specify parent object via -C."); return tool_rc_option_error; } if (ctx.flags.i && ctx.flags.G) { LOG_ERR("Cannot specify -G and -i together."); return tool_rc_option_error; } return tool_rc_success; } tool_rc tpm2_tool_onrun(ESYS_CONTEXT *ectx, tpm2_option_flags flags) { UNUSED(flags); TPMA_OBJECT attrs = DEFAULT_ATTRS; tool_rc rc = check_options(); if (rc != tool_rc_success) { return rc; } if (ctx.flags.i) { bool res = load_sensitive(); if (!res) { return tool_rc_general_error; } ctx.object.alg = "keyedhash"; if (!ctx.flags.b) { attrs &= ~TPMA_OBJECT_SIGN_ENCRYPT; attrs &= ~TPMA_OBJECT_DECRYPT; attrs &= ~TPMA_OBJECT_SENSITIVEDATAORIGIN; } } else if (!ctx.flags.b && !strncmp("hmac", ctx.object.alg, 4)) { attrs &= ~TPMA_OBJECT_DECRYPT; } bool result = tpm2_alg_util_public_init(ctx.object.alg, ctx.object.name_alg, ctx.object.attrs, ctx.object.policy, NULL, attrs, &ctx.object.public); if (!result) { return tool_rc_general_error; } if (ctx.flags.L && !ctx.object.auth_str) { ctx.object.public.publicArea.objectAttributes &= ~TPMA_OBJECT_USERWITHAUTH; } if (ctx.flags.i && ctx.object.public.publicArea.type != TPM2_ALG_KEYEDHASH) { LOG_ERR("Only TPM2_ALG_KEYEDHASH algorithm is allowed when sealing data"); return tool_rc_general_error; } rc = tpm2_util_object_load_auth(ectx, ctx.parent.ctx_path, ctx.parent.auth_str, &ctx.parent.object, false, TPM2_HANDLE_ALL_W_NV); if (rc != tool_rc_success) { return rc; } tpm2_session *tmp; rc = tpm2_auth_util_from_optarg(NULL, ctx.object.auth_str, &tmp, true); if (rc != tool_rc_success) { LOG_ERR("Invalid key authorization"); return rc; } TPM2B_AUTH const *auth = tpm2_session_get_auth_value(tmp); ctx.object.sensitive.sensitive.userAuth = *auth; tpm2_session_close(&tmp); return create(ectx); } tool_rc tpm2_tool_onstop(ESYS_CONTEXT *ectx) { UNUSED(ectx); return tpm2_session_close(&ctx.parent.object.session); }
26.59396
82
0.594826
[ "object" ]
0c420de7b18d558071d5ad7f55037627f87d7810
1,377
h
C
Class/WXMComponentHeader.h
XiaoMing-Wang/WXMComponentManager
5c436bd0831ee38b986956015d7c5c57cb07c33c
[ "MIT" ]
1
2019-04-28T08:34:57.000Z
2019-04-28T08:34:57.000Z
Class/WXMComponentHeader.h
XiaoMing-Wang/WQComponentManager
5c436bd0831ee38b986956015d7c5c57cb07c33c
[ "MIT" ]
null
null
null
Class/WXMComponentHeader.h
XiaoMing-Wang/WQComponentManager
5c436bd0831ee38b986956015d7c5c57cb07c33c
[ "MIT" ]
null
null
null
// // WQComponentHeader.h // ModulesProject // // Created by wq on 2019/4/20. // Copyright © 2019年 wq. All rights reserved. /** 注册协议 */ #define WCKitService(serviceInstance, procotol) \ class NSObject; \ char *k##procotol##_ser \ WXMKitDATA(WXMModuleClass) = "{ \""#procotol"\" : \""#serviceInstance"\" }"; /** 定义信号 */ #define WCSIGNAL_DEFINE(signal, describe) \ class NSObject; \ static WXM_SIGNAL const signal = (@#signal); /** 单例 */ #define WCRouterInstance [WXMComponentRouter sharedInstance] #define WCMangerInstance [WXMComponentManager sharedInstance] #define WCSeiviceInstance [WXMComponentServiceHelp sharedInstance] /** 信号 */ #define WCBridgeObserve(target, signal) WXMComponentBridge.observe(target, signal) #define WCBridgeSendSignal(signal, parameter) WXMComponentBridge.sendSignal(signal, parameter) /** Service */ #define WCService(aString) [WCSeiviceInstance serviceProvide:@protocol(aString) depend:self]; /** Error */ #define WCError(code, msg, obj) [WXMComponentError error:code message:msg object:obj]; #import "WXMComponentBridge.h" #import "WXMComponentRouter.h" #import "WXMComponentManager.h" #import "WXMComponentContext.h" #import "WXMComponentData.h" #import "WXMComponentAnnotation.h" #import "WXMComponentConfiguration.h" #import "WXMComponentBaseService.h" #import "WXMComponentServiceHelp.h" //#import "WXMAllComponentProtocol.h"
29.297872
94
0.763253
[ "object" ]
0c43ec36385efaa3ec67420939802b6eba8e5197
6,940
h
C
include/Common.h
RazvanN7/muir
e61c97a46ee22f7c0ab88325197416d1b04c693a
[ "MIT" ]
null
null
null
include/Common.h
RazvanN7/muir
e61c97a46ee22f7c0ab88325197416d1b04c693a
[ "MIT" ]
null
null
null
include/Common.h
RazvanN7/muir
e61c97a46ee22f7c0ab88325197416d1b04c693a
[ "MIT" ]
null
null
null
#ifndef COMMON_H #define COMMON_H #include "llvm/ADT/DenseSet.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/DebugInfo.h" #include "llvm/IR/InstVisitor.h" #include "llvm/IR/LegacyPassManager.h" #include "llvm/IR/Module.h" #include "llvm/Support/CommandLine.h" #include "NodeType.h" #include <map> #include <sstream> #include <string> #define WARNING(x) \ do { \ std::cout << "\033[1;31m[WARNING] \033[0m" \ << "\033[1;33m" << x << " \033[0m" << std::endl; \ } while (0) #define ASSERTION(x) \ do { \ std::cout << "\033[1;35m" << x << "\033[0m"; \ << "\033[1;33m" << x << " \033[0m" << std::endl; \ } while (0) #define PURPLE(x) "\033[1;35m" << x << "\033[0m"; using namespace std; using namespace llvm; namespace common { /** * Implimentaiton of FloatingPointIEEE754 */ union FloatingPointIEEE754 { struct ieee754 { ieee754() : mantissa(0), exponent(0), sign(0) {} unsigned int mantissa : 23; unsigned int exponent : 8; unsigned int sign : 1; }; ieee754 raw; unsigned int bits; float f; FloatingPointIEEE754() : f(0) {} }; // Structures struct GepInfo { uint32_t overall_size; std::vector<uint32_t> element_size; GepInfo() : overall_size(0) { element_size.clear(); } GepInfo(std::vector<uint32_t> _input_elements) : element_size(_input_elements) { overall_size = _input_elements.back(); } }; // Functions void optimizeModule(llvm::Module *); void PrintFunctionDFG(llvm::Module &); InstructionType getLLVMOpcodeName(uint32_t OpCode); } namespace helpers { /** * Print helper function */ bool helperReplace(std::string &, const std::string &, const std::string &); bool helperReplace(std::string &, const std::string &, std::vector<const std::string> &, const std::string &); bool helperReplace(std::string &, const std::string &, std::vector<uint32_t>, const std::string &); bool helperReplace(std::string &, const std::string &, const uint32_t); bool helperReplace(std::string &, const std::string &, const int); bool helperReplace(std::string &, const std::string &, std::vector<const uint32_t> &); bool helperReplace(std::string &, const std::string &, std::list<std::pair<uint32_t, uint32_t>> &, const std::string &); /** * FUNCTIONS */ void printAlloca(llvm::Function &); void printStruct(llvm::Module &); void printDFG(llvm::Function &); void printDFG(llvm::Module &); void PDGPrinter(llvm::Function &); void UIDLabel(Function &); void FunctionUIDLabel(llvm::Function &); /** * CLSSES */ /** * pdgDump class dumps PDG of the given funciton */ struct pdgDump : public llvm::FunctionPass { static char ID; pdgDump() : FunctionPass(ID) {} virtual bool runOnFunction(llvm::Function &F); }; class DFGPrinter : public llvm::FunctionPass, public llvm::InstVisitor<DFGPrinter> { friend class InstVisitor<DFGPrinter>; void visitFunction(llvm::Function &F); void visitBasicBlock(llvm::BasicBlock &BB); void visitInstruction(llvm::Instruction &I); stringstream dot; std::map<llvm::Value *, uint64_t> nodes; uint64_t counter_ins; uint64_t counter_bb; uint64_t counter_cnst; uint64_t counter_arg; public: static char ID; DFGPrinter() : FunctionPass(ID), counter_ins(0), counter_bb(0), counter_cnst(0), counter_arg(0) {} bool doInitialization(llvm::Module &) override; bool doFinalization(llvm::Module &) override; bool runOnFunction(llvm::Function &) override; void getAnalysisUsage(llvm::AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class LabelUID : public FunctionPass, public InstVisitor<LabelUID> { friend class InstVisitor<LabelUID>; uint64_t counter; void visitFunction(Function &F); void visitBasicBlock(BasicBlock &BB); void visitInstruction(Instruction &I); template <typename T> void visitGeneric(string, T &); map<Value *, uint64_t> values; public: static char ID; LabelUID() : FunctionPass(ID), counter(0) {} bool doInitialization(Module &) override { counter = 0; values.clear(); return false; }; bool doFinalization(Module &) override { return true; }; bool runOnFunction(Function &) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class GepInformation : public ModulePass, public InstVisitor<GepInformation> { friend class InstVisitor<GepInformation>; void visitGetElementPtrInst(llvm::GetElementPtrInst &I); public: static char ID; // Gep containers std::map<llvm::Instruction *, common::GepInfo> GepAddress; // Function name llvm::StringRef function_name; GepInformation(llvm::StringRef FN) : ModulePass(ID), function_name(FN) {} bool doInitialization(Module &) override { return false; }; bool doFinalization(Module &) override { return true; }; bool runOnModule(Module &) override; void getAnalysisUsage(AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class InstCounter : public llvm::ModulePass { public: static char ID; // Function name llvm::StringRef function_name; std::map<llvm::BasicBlock *, uint64_t> BasicBlockCnt; InstCounter(llvm::StringRef fn) : llvm::ModulePass(ID), function_name(fn) {} bool doInitialization(llvm::Module &) override; bool doFinalization(llvm::Module &) override; bool runOnModule(llvm::Module &) override; void getAnalysisUsage(llvm::AnalysisUsage &AU) const override { AU.setPreservesAll(); } }; class CallInstSpliter : public ModulePass, public InstVisitor<CallInstSpliter> { friend class InstVisitor<CallInstSpliter>; private: llvm::SmallVector<llvm::CallInst *, 10> call_container; public: static char ID; // Function name llvm::StringRef function_name; CallInstSpliter() : llvm::ModulePass(ID), function_name("") {} CallInstSpliter(llvm::StringRef fn) : llvm::ModulePass(ID), function_name(fn) {} bool doInitialization(llvm::Module &) override; bool doFinalization(llvm::Module &) override; bool runOnModule(llvm::Module &) override; void getAnalysisUsage(llvm::AnalysisUsage &AU) const override { AU.setPreservesAll(); } void visitCallInst(llvm::CallInst &Inst); }; } #endif
24.785714
80
0.636167
[ "vector" ]
bd07a3034b6186b04ab049e3df120bbd5f9c45ef
10,830
h
C
Source/WebKit/efl/WebCoreSupport/FrameLoaderClientEfl.h
VincentWei/mdolphin-core
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
[ "Apache-2.0" ]
6
2017-05-31T01:46:45.000Z
2018-06-12T10:53:30.000Z
Source/WebKit/efl/WebCoreSupport/FrameLoaderClientEfl.h
FMSoftCN/mdolphin-core
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
[ "Apache-2.0" ]
null
null
null
Source/WebKit/efl/WebCoreSupport/FrameLoaderClientEfl.h
FMSoftCN/mdolphin-core
48ffdcf587a48a7bb4345ae469a45c5b64ffad0e
[ "Apache-2.0" ]
2
2017-07-17T06:02:42.000Z
2018-09-19T10:08:38.000Z
/* * Copyright (C) 2006 Zack Rusin <zack@kde.org> * Copyright (C) 2006, 2011 Apple Inc. All rights reserved. * Copyright (C) 2008 Collabora Ltd. All rights reserved. * Copyright (C) 2008 INdT - Instituto Nokia de Tecnologia * Copyright (C) 2009-2010 ProFUSION embedded systems * Copyright (C) 2009-2010 Samsung Electronics * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef FrameLoaderClientEfl_h #define FrameLoaderClientEfl_h #include "EWebKit.h" #include "FrameLoaderClient.h" #include "PluginView.h" #include "ResourceError.h" #include "ResourceResponse.h" namespace WebCore { class FormState; class FrameLoaderClientEfl : public FrameLoaderClient { public: explicit FrameLoaderClientEfl(Evas_Object *view); virtual ~FrameLoaderClientEfl() { } virtual void frameLoaderDestroyed(); void setWebFrame(Evas_Object *frame) { m_frame = frame; } Evas_Object* webFrame() const { return m_frame; } Evas_Object* webView() const { return m_view; } void setCustomUserAgent(const String &agent); const String& customUserAgent() const; virtual bool hasWebView() const; virtual bool hasFrameView() const; void callPolicyFunction(FramePolicyFunction function, PolicyAction action); virtual void makeRepresentation(DocumentLoader*); virtual void forceLayout(); virtual void forceLayoutForNonHTML(); virtual void setCopiesOnScroll(); virtual void detachedFromParent2(); virtual void detachedFromParent3(); virtual void loadedFromCachedPage(); virtual void assignIdentifierToInitialRequest(unsigned long identifier, DocumentLoader*, const ResourceRequest&); virtual void dispatchWillSendRequest(DocumentLoader*, unsigned long identifier, ResourceRequest&, const ResourceResponse& redirectResponse); virtual bool shouldUseCredentialStorage(DocumentLoader*, unsigned long identifier); virtual void dispatchDidReceiveAuthenticationChallenge(DocumentLoader*, unsigned long identifier, const AuthenticationChallenge&); virtual void dispatchDidPushStateWithinPage(); virtual void dispatchDidPopStateWithinPage(); virtual void dispatchDidReplaceStateWithinPage(); virtual void dispatchDidAddBackForwardItem(WebCore::HistoryItem*) const; virtual void dispatchDidRemoveBackForwardItem(WebCore::HistoryItem*) const; virtual void dispatchDidChangeBackForwardIndex() const; virtual void dispatchDidClearWindowObjectInWorld(WebCore::DOMWrapperWorld*); virtual void dispatchDidCancelAuthenticationChallenge(DocumentLoader*, unsigned long identifier, const AuthenticationChallenge&); virtual void dispatchDidReceiveResponse(DocumentLoader*, unsigned long identifier, const ResourceResponse&); virtual void dispatchDidReceiveContentLength(DocumentLoader*, unsigned long identifier, int lengthReceived); virtual void dispatchDidFinishLoading(DocumentLoader*, unsigned long identifier); virtual void dispatchDidFailLoading(DocumentLoader*, unsigned long identifier, const ResourceError&); virtual bool dispatchDidLoadResourceFromMemoryCache(DocumentLoader*, const ResourceRequest&, const ResourceResponse&, int length); virtual void dispatchDidLoadResourceByXMLHttpRequest(unsigned long identifier, const String& sourceString); virtual void dispatchDidHandleOnloadEvents(); virtual void dispatchDidReceiveServerRedirectForProvisionalLoad(); virtual void dispatchDidCancelClientRedirect(); virtual void dispatchWillPerformClientRedirect(const KURL&, double, double); virtual void dispatchDidChangeLocationWithinPage(); virtual void dispatchWillClose(); virtual void dispatchDidReceiveIcon(); virtual void dispatchDidStartProvisionalLoad(); virtual void dispatchDidReceiveTitle(const StringWithDirection&); virtual void dispatchDidChangeIcons(); virtual void dispatchDidCommitLoad(); virtual void dispatchDidFailProvisionalLoad(const ResourceError&); virtual void dispatchDidFailLoad(const ResourceError&); virtual void dispatchDidFinishDocumentLoad(); virtual void dispatchDidFinishLoad(); virtual void dispatchDidFirstLayout(); virtual void dispatchDidFirstVisuallyNonEmptyLayout(); virtual Frame* dispatchCreatePage(const WebCore::NavigationAction&); virtual void dispatchShow(); virtual void dispatchDecidePolicyForResponse(FramePolicyFunction, const ResourceResponse&, const ResourceRequest&); virtual void dispatchDecidePolicyForNewWindowAction(FramePolicyFunction, const NavigationAction&, const ResourceRequest&, WTF::PassRefPtr<FormState>, const String& frameName); virtual void dispatchDecidePolicyForNavigationAction(FramePolicyFunction, const NavigationAction&, const ResourceRequest&, WTF::PassRefPtr<FormState>); virtual void cancelPolicyCheck(); virtual void dispatchUnableToImplementPolicy(const ResourceError&); virtual void dispatchWillSendSubmitEvent(HTMLFormElement*) { } virtual void dispatchWillSubmitForm(FramePolicyFunction, WTF::PassRefPtr<FormState>); virtual void dispatchDidLoadMainResource(DocumentLoader*); virtual void revertToProvisionalState(DocumentLoader*); virtual void setMainDocumentError(DocumentLoader*, const ResourceError&); virtual void postProgressStartedNotification(); virtual void postProgressEstimateChangedNotification(); virtual void postProgressFinishedNotification(); virtual PassRefPtr<Frame> createFrame(const KURL& url, const String& name, HTMLFrameOwnerElement* ownerElement, const String& referrer, bool allowsScrolling, int marginWidth, int marginHeight); virtual void didTransferChildFrameToNewDocument(Page*); virtual void transferLoadingResourceFromPage(unsigned long, WebCore::DocumentLoader*, const ResourceRequest&, WebCore::Page*); virtual PassRefPtr<Widget> createPlugin(const IntSize&, HTMLPlugInElement*, const KURL&, const WTF::Vector<String>&, const WTF::Vector<String>&, const String&, bool); virtual void redirectDataToPlugin(Widget* pluginWidget); virtual PassRefPtr<Widget> createJavaAppletWidget(const IntSize&, HTMLAppletElement*, const KURL& baseURL, const WTF::Vector<String>& paramNames, const WTF::Vector<String>& paramValues); virtual String overrideMediaType() const; virtual void windowObjectCleared(); virtual void documentElementAvailable(); virtual void didPerformFirstNavigation() const; virtual void registerForIconNotification(bool); virtual ObjectContentType objectContentType(const KURL&, const String& mimeType, bool shouldPreferPlugInsForImages); virtual void setMainFrameDocumentReady(bool); virtual void startDownload(const ResourceRequest&); virtual void willChangeTitle(DocumentLoader*); virtual void didChangeTitle(DocumentLoader*); virtual void committedLoad(DocumentLoader*, const char*, int); virtual void finishedLoading(DocumentLoader*); virtual void updateGlobalHistory(); virtual void updateGlobalHistoryRedirectLinks(); virtual bool shouldGoToHistoryItem(HistoryItem*) const; virtual bool shouldStopLoadingForHistoryItem(HistoryItem*) const; virtual void didDisplayInsecureContent(); virtual void didRunInsecureContent(SecurityOrigin*, const KURL&); virtual ResourceError cancelledError(const ResourceRequest&); virtual ResourceError blockedError(const ResourceRequest&); virtual ResourceError cannotShowURLError(const ResourceRequest&); virtual ResourceError interruptForPolicyChangeError(const ResourceRequest&); virtual ResourceError cannotShowMIMETypeError(const ResourceResponse&); virtual ResourceError fileDoesNotExistError(const ResourceResponse&); virtual ResourceError pluginWillHandleLoadError(const ResourceResponse&); virtual bool shouldFallBack(const ResourceError&); virtual bool canHandleRequest(const ResourceRequest&) const; virtual bool canShowMIMEType(const String&) const; virtual bool canShowMIMETypeAsHTML(const String& MIMEType) const; virtual bool representationExistsForURLScheme(const String&) const; virtual String generatedMIMETypeForURLScheme(const String&) const; virtual void frameLoadCompleted(); virtual void saveViewStateToItem(HistoryItem*); virtual void restoreViewState(); virtual void provisionalLoadStarted(); virtual void didFinishLoad(); virtual void prepareForDataSourceReplacement(); virtual WTF::PassRefPtr<DocumentLoader> createDocumentLoader(const ResourceRequest&, const SubstituteData&); virtual void setTitle(const StringWithDirection& title, const KURL&); virtual String userAgent(const KURL&); virtual void savePlatformDataToCachedFrame(CachedFrame*); virtual void transitionToCommittedFromCachedFrame(CachedFrame*); virtual void transitionToCommittedForNewPage(); virtual void didSaveToPageCache(); virtual void didRestoreFromPageCache(); virtual void dispatchDidBecomeFrameset(bool); virtual bool canCachePage() const; virtual void download(ResourceHandle*, const ResourceRequest&, const ResourceRequest&, const ResourceResponse&); virtual PassRefPtr<WebCore::FrameNetworkingContext> createNetworkingContext(); private: Evas_Object *m_view; Evas_Object *m_frame; ResourceResponse m_response; String m_userAgent; String m_customUserAgent; ResourceError m_loadError; // Plugin view to redirect data to PluginView* m_pluginView; bool m_hasSentResponseToPlugin; }; } #endif // FrameLoaderClientEfl_h
46.883117
190
0.787627
[ "vector" ]
bd088a46ae760e66a4489ce00901fd624ede3413
8,379
c
C
examples/dstar.c
andydansby/z88dk-mk2
51c15f1387293809c496f5eaf7b196f8a0e9b66b
[ "ClArtistic" ]
1
2020-09-15T08:35:49.000Z
2020-09-15T08:35:49.000Z
examples/dstar.c
andydansby/z88dk-MK2
51c15f1387293809c496f5eaf7b196f8a0e9b66b
[ "ClArtistic" ]
null
null
null
examples/dstar.c
andydansby/z88dk-MK2
51c15f1387293809c496f5eaf7b196f8a0e9b66b
[ "ClArtistic" ]
null
null
null
/* * Ported to the Ti82/83/83+ (rest will follow) by Henk Poley * Extended with different sprite sizes and sound by Stefano Bodrato * * * * * * * * * * dstar.c * * DStar Z88 - C Demo * Original TI game By A Von Dollen * Converted to Z88 By D Morris * Keys: Q,A,O,P,SPACE,H,G * * * * * * * * * * dstarz88 is a conversion of a TI86 game I found with * source on www.ticalc.org. * * The original program was written by Andrew Von Dollen who * in turn based it on a HP game by Joe W. * * The aim of the game is to collect all the clear bubbles by * running over them. You control either the dark bubble or * the solid box. The dark bubble is used to collect the clear * bubbles, and the solid box is used as a sort of movable wall. * * Both objects carry on moving until they hit something else * (except for the dark bubble in the case of clear bubbles). * * * * * * * * * * The keys are defined in #define statements, and default thus: * * Up: Q * Down: A * Left: O * Right: P * Quit: G * Retry: H * Switch: [SPACE] * * Switch changes between the dark bubble and the solid box. * * * On the TI Calculators the keyboard mapping is: * * up,down,left,right - move ball/box * [Enter] - toggle ball/box * 7 - Quit * 9 - Restart level * +,- - CHEAT.... * * * * * * * * * * This is the first game ever produced with the Small C compiler - * it was written as a statement saying that it is possible to * write something easily, quickly and efficiently using the * compiler. Hopefully it will be an encouragement for others to * do likewise! * * * * * * * * * * Compile examples : * * To get a TI82 version of the game (optionally you could add sound): * zcc +ti82 -create-app dstar.c * * To get a TI85 version of the game (optionally you could add sound): * zcc +ti85 -Dspritesize=7 -create-app dstar.c * * To get a Spectrum 16K version of the game: * zcc +zx -Dspritesize=16 -DSOUND -create-app -zorg=24300 dstar.c * * To get a TS2068 HRG version of the game: * zcc +ts2068 -startup=2 -Dspritesize=21 -DSOUND -create-app dstar.c * * To get a VZ200 version: * zcc +vz -Dspritesize=7 -DSOUND -odztar.vz dstar.c * * MSXDOS: * zcc +msx -Dspritesize=16 -DSOUND -startup=2 dstar.c * * MSX: * zcc +msx -Dspritesize=16 -DSOUND -create-app dstar.c * * To get an 80 pixel graphics version of the game (Mattel Aquarius, TRS80, etc): * zcc +aquarius -Dspritesize=5 -create-app dstar.c * * Even smaller version of the game: * zcc +gal -Dspritesize=4 -create-app dstar.c * * (in the above examples the sprite size can be set to 4,5,6,7,8 or 16 * and sound can optionally be added with some target) * * * * * * * * * * Enough twaddle, enjoy the game and study the source! * * d. <djm@jb.man.ac.uk> 1/12/98 * * * * * * * * */ #include <stdio.h> #include <games.h> #include <stdlib.h> #include <graphics.h> #ifdef SOUND #include <sound.h> #endif /* #define spritesize 4 --> minimalistic, 64x36 pixels */ /* #define spritesize 5 --> very low resolutions, 80x45 pixels */ /* #define spritesize 6 --> TI mode, 96x54 */ /* #define spritesize 7 --> TI85/86, VZ200 */ /* #define spritesize 8 --> 128x72 pixels */ /* #define spritesize 16 --> Big screen mode 256x144 */ /* #define spritesize 21 --> Wide screen mode 512x192 */ #ifndef spritesize #define spritesize 6 #endif /* Single sprite memory usage, including bytes for its size */ #if (spritesize == 16) #define spritemem 34 #endif #if (spritesize == 21) #define spritemem 90 #endif #ifndef spritemem #define spritemem (spritesize+2) #endif #include "dstar.h" void main() { Level = (STARTLEV-1); SetupLevel(); /* Display the first level */ /* Loop keyhandler till you finished the game */ while (CheckNotFinished()) Gamekeys(); } void Gamekeys(void) { char *charptr; /* Set up a pointer to the variable we want to change * (either the box or the ball) */ charptr = PieceIsBall ? &BoxOffset : &BallOffset; switch(getk()) { case K_DOWN: MovePiece(charptr,0,+1); break; case K_UP: MovePiece(charptr,0,-1); break; case K_RIGHT: MovePiece(charptr,+1,0); break; case K_LEFT: MovePiece(charptr,-1,0); break; case K_SWITCH: PieceIsBall^=1; /* Toggle ball/box */ #ifdef SOUND bit_fx4 (5); #endif while (getk() == K_SWITCH) {} break; case K_EXIT: exit(0); case K_NEXTLEV: /* Okay this IS cheating... */ if(++Level==MAXLEVEL) { --Level; break; } SetupLevel(); break; case K_PREVLEV: if(--Level==-1) { ++Level; break; } /* fall thrue */ case K_CLEAR: #ifdef SOUND bit_fx4 (3); #endif SetupLevel(); } } /* The level is stored 'compressed', taking up 38 bytes a time. * byte 0 - position of ball * byte 1 - position of box * 2-37 - Level data * * Level data is stored as two bits per block, so we have to shift our * picked up byte round to get it. */ void SetupLevel(void) { int x; char *ptr,*ptr2; /* Fresh level, so start with the ball */ PieceIsBall = FALSE; ptr2 = Board; /* We copy to the Board */ ptr = levels + (Level * 38); /* from the Level data */ /* First two bytes are the ball and the box position */ BallOffset = *ptr++; BoxOffset = *ptr++; /* Decompress Level into the Board */ for (x=0; x!=36; x++) { *ptr2++=((*ptr)>>6)&3; *ptr2++=((*ptr)>>4)&3; *ptr2++=((*ptr)>>2)&3; *ptr2++=( *ptr) &3; ptr++; } /* Put the ball and box into their Board position */ *(Board+BallOffset) = BALL; *(Board+BoxOffset) = BOX; DrawBoard(); /* Display the clean Board */ #ifdef SOUND bit_fx4 (1); #endif } void DrawBoard(void) { int x,y; char *ptr; ptr = Board; clg(); /* clear the screen */ for (y=0 ; y!=9 ; y++) { for (x=0 ; x!=16 ; x++) { #if (spritesize == 21) putsprite(spr_or,(x*32),(y*spritesize),sprites + (spritemem * (*ptr++))); #else putsprite(spr_or,(x*spritesize),(y*spritesize),sprites + (spritemem * (*ptr++))); #endif } } } /* Check if a Level is (not) finished: * There are 144 squares in each Level * * Note the use of != instead of < or <= * - this is faster to execute on the Z80! */ char CheckNotFinished(void) { char *ptr; int i; ptr = Board; for(i=1 ; i!=144 ; i++) { if(*ptr++ == BUBB) return(TRUE); /* Are there any bubbles? */ } if(++Level == MAXLEVEL) return(FALSE); /* All levels done? */ SetupLevel(); /* If not => Next Level! */ return(TRUE); /* And keep scanning keys */ } /* Check to see if we're running into anything: * - The box stops for everything (exept empty space [= 0]) * - The ball stops for everything exept a bubble */ char TestNextPosIsStop(char nextpos) { if(!PieceIsBall) if (nextpos==BUBB) return(FALSE); return(nextpos); } void MovePiece(char *ptr, char plusx, char plusy) { char *locn; char temp,temp2; int x,y; temp = PieceIsBall + 3; temp2 = (plusx + (plusy * 16)); while(1) /* loop */ { locn = *(ptr) + Board; if(TestNextPosIsStop(*(locn+temp2))) return; /* till edge */ y = (*(ptr) / 16); #if (spritesize == 21) x = (*(ptr) - (y * 16)) * 32; #else x = (*(ptr) - (y * 16)) * spritesize; #endif y *= spritesize; if(*(locn+temp2)==BUBB) { #if (spritesize == 21) putsprite(spr_xor,x+(plusx*32),y+(plusy*spritesize),sprites + (spritemem * BUBB)); #else putsprite(spr_xor,x+(plusx*spritesize),y+(plusy*spritesize),sprites + (spritemem * BUBB)); #endif #ifdef SOUND bit_fx2 (5); #endif } *(locn+temp2) = *locn; *locn = 0; /* remove old */ putsprite(spr_xor,x,y,sprites + (spritemem * temp)); /* put new */ #if (spritesize == 21) putsprite(spr_xor,x+(plusx*32),y+(plusy*spritesize),sprites + (spritemem * temp)); #else putsprite(spr_xor,x+(plusx*spritesize),y+(plusy*spritesize),sprites + (spritemem * temp)); #endif #ifdef SOUND bit_fx2 (2); #endif (*ptr) += temp2; } }
23.339833
93
0.590285
[ "solid" ]
bd0da433e6d72471b259616980cfeac8748bd5cf
3,101
c
C
linux-3.16/arch/x86/kvm/irq.c
jj1232727/system_call
145315cdf532c45b6aa753d98260d2b1c0b63abc
[ "Unlicense" ]
null
null
null
linux-3.16/arch/x86/kvm/irq.c
jj1232727/system_call
145315cdf532c45b6aa753d98260d2b1c0b63abc
[ "Unlicense" ]
null
null
null
linux-3.16/arch/x86/kvm/irq.c
jj1232727/system_call
145315cdf532c45b6aa753d98260d2b1c0b63abc
[ "Unlicense" ]
null
null
null
/* * irq.c: API for in kernel interrupt controller * Copyright (c) 2007, Intel Corporation. * Copyright 2009 Red Hat, Inc. and/or its affiliates. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * Authors: * Yaozu (Eddie) Dong <Eddie.dong@intel.com> * */ #include <linux/module.h> #include <linux/kvm_host.h> #include "irq.h" #include "i8254.h" #include "x86.h" /* * check if there are pending timer events * to be processed. */ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { return apic_has_pending_timer(vcpu); } EXPORT_SYMBOL(kvm_cpu_has_pending_timer); /* * check if there is pending interrupt from * non-APIC source without intack. */ static int kvm_cpu_has_extint(struct kvm_vcpu *v) { if (kvm_apic_accept_pic_intr(v)) return pic_irqchip(v->kvm)->output; /* PIC */ else return 0; } /* * check if there is injectable interrupt: * when virtual interrupt delivery enabled, * interrupt from apic will handled by hardware, * we don't need to check it here. */ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v) { if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.pending; if (kvm_cpu_has_extint(v)) return 1; if (kvm_apic_vid_enabled(v->kvm)) return 0; return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } /* * check if there is pending interrupt without * intack. */ int kvm_cpu_has_interrupt(struct kvm_vcpu *v) { if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.pending; if (kvm_cpu_has_extint(v)) return 1; return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_has_interrupt); /* * Read pending interrupt(from non-APIC source) * vector and intack. */ static int kvm_cpu_get_extint(struct kvm_vcpu *v) { if (kvm_cpu_has_extint(v)) return kvm_pic_read_irq(v->kvm); /* PIC */ return -1; } /* * Read pending interrupt vector and intack. */ int kvm_cpu_get_interrupt(struct kvm_vcpu *v) { int vector; if (!irqchip_in_kernel(v->kvm)) return v->arch.interrupt.nr; vector = kvm_cpu_get_extint(v); if (kvm_apic_vid_enabled(v->kvm) || vector != -1) return vector; /* PIC */ return kvm_get_apic_interrupt(v); /* APIC */ } EXPORT_SYMBOL_GPL(kvm_cpu_get_interrupt); void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu) { kvm_inject_apic_timer_irqs(vcpu); /* TODO: PIT, RTC etc. */ } EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs); void __kvm_migrate_timers(struct kvm_vcpu *vcpu) { __kvm_migrate_apic_timer(vcpu); __kvm_migrate_pit_timer(vcpu); }
23.853846
79
0.73041
[ "vector" ]
bd15540dc12524fe2ac9bbcd9050067ae54224bb
4,872
h
C
libcef/common/crash_reporter_client.h
amirbn7/cef
732a307c751c2670a35fd9e0e4a491cdfc6bcc6b
[ "BSD-3-Clause" ]
4
2019-10-30T10:11:34.000Z
2021-08-24T23:04:30.000Z
libcef/common/crash_reporter_client.h
amirbn7/cef
732a307c751c2670a35fd9e0e4a491cdfc6bcc6b
[ "BSD-3-Clause" ]
1
2020-01-19T15:54:10.000Z
2020-01-19T15:54:10.000Z
libcef/common/crash_reporter_client.h
amirbn7/cef
732a307c751c2670a35fd9e0e4a491cdfc6bcc6b
[ "BSD-3-Clause" ]
4
2018-12-25T05:17:55.000Z
2020-02-17T04:55:59.000Z
// Copyright 2016 The Chromium Embedded Framework Authors. Portions copyright // 2016 The Chromium Authors. All rights reserved. Use of this source code is // governed by a BSD-style license that can be found in the LICENSE file. #ifndef CEF_LIBCEF_COMMON_CRASH_REPORTER_CLIENT_H_ #define CEF_LIBCEF_COMMON_CRASH_REPORTER_CLIENT_H_ #include <string> #include <vector> // Include this first to avoid compiler errors. #include "base/compiler_specific.h" #include "include/cef_version.h" #include "base/macros.h" #include "base/synchronization/lock.h" #include "build/build_config.h" #include "components/crash/content/app/crash_reporter_client.h" // Global object that is instantiated in each process and configures crash // reporting. On Windows this is created by the // InitializeCrashReportingForProcess() method called from chrome_elf. On // Linux and macOS this is created by crash_reporting::BasicStartupComplete(). class CefCrashReporterClient : public crash_reporter::CrashReporterClient { public: CefCrashReporterClient(); ~CefCrashReporterClient() override; // Reads the crash config file and returns true on success. Failure to read // the crash config file will disable crash reporting. This method should be // called immediately after the CefCrashReporterClient instance is created. bool ReadCrashConfigFile(); bool HasCrashConfigFile() const; #if defined(OS_WIN) // Called from chrome_elf (chrome_elf/crash/crash_helper.cc) to instantiate // a process wide instance of CefCrashReporterClient and initialize crash // reporting for the process. The instance is leaked. // crash_reporting_win::InitializeCrashReportingForModule() will be called // later from crash_reporting::PreSandboxStartup() to read global state into // the module address space. static void InitializeCrashReportingForProcess(); bool GetAlternativeCrashDumpLocation(base::string16* crash_dir) override; void GetProductNameAndVersion(const base::string16& exe_path, base::string16* product_name, base::string16* version, base::string16* special_build, base::string16* channel_name) override; bool GetCrashDumpLocation(base::string16* crash_dir) override; bool GetCrashMetricsLocation(base::string16* metrics_dir) override; #elif defined(OS_POSIX) void GetProductNameAndVersion(const char** product_name, const char** version) override; void GetProductNameAndVersion(std::string* product_name, std::string* version, std::string* channel) override; #if !defined(OS_MACOSX) base::FilePath GetReporterLogFilename() override; bool EnableBreakpadForProcess(const std::string& process_type) override; #endif bool GetCrashDumpLocation(base::FilePath* crash_dir) override; #endif // defined(OS_POSIX) // All of these methods must return true to enable crash report upload. bool GetCollectStatsConsent() override; bool GetCollectStatsInSample() override; #if defined(OS_WIN) || defined(OS_MACOSX) bool ReportingIsEnforcedByPolicy(bool* crashpad_enabled) override; #endif #if defined(OS_POSIX) && !defined(OS_MACOSX) bool IsRunningUnattended() override; #endif std::string GetCrashServerURL() override; void GetCrashOptionalArguments(std::vector<std::string>* arguments) override; #if defined(OS_WIN) base::string16 GetCrashExternalHandler( const base::string16& exe_dir) override; bool HasCrashExternalHandler() const; #endif #if defined(OS_MACOSX) bool EnableBrowserCrashForwarding() override; #endif #if defined(OS_POSIX) && !defined(OS_MACOSX) ParameterMap FilterParameters(const ParameterMap& parameters) override; #endif // Set or clear a crash key value. bool SetCrashKeyValue(const base::StringPiece& key, const base::StringPiece& value); private: bool has_crash_config_file_ = false; enum KeySize { SMALL_SIZE, MEDIUM_SIZE, LARGE_SIZE }; // Map of crash key name to (KeySize, index). // Const access to |crash_keys_| is thread-safe after initialization. using KeyMap = std::map<std::string, std::pair<KeySize, size_t>>; KeyMap crash_keys_; // Modification of CrashKeyString values must be synchronized. base::Lock crash_key_lock_; std::string server_url_; bool rate_limit_ = true; int max_uploads_ = 5; int max_db_size_ = 20; int max_db_age_ = 5; std::string product_name_ = "cef"; std::string product_version_ = CEF_VERSION; #if defined(OS_WIN) std::string app_name_ = "CEF"; std::string external_handler_; #endif #if defined(OS_MACOSX) bool enable_browser_crash_forwarding_ = false; #endif DISALLOW_COPY_AND_ASSIGN(CefCrashReporterClient); }; #endif // CEF_LIBCEF_COMMON_CRASH_REPORTER_CLIENT_H_
36.631579
79
0.743227
[ "object", "vector" ]
bd1749709f3bcdc29cc72749f8333793de5867d7
241,936
c
C
osprey/kgccfe/gnu/config/ia64/ia64.c
sharugupta/OpenUH
daddd76858a53035f5d713f648d13373c22506e8
[ "BSD-2-Clause" ]
null
null
null
osprey/kgccfe/gnu/config/ia64/ia64.c
sharugupta/OpenUH
daddd76858a53035f5d713f648d13373c22506e8
[ "BSD-2-Clause" ]
null
null
null
osprey/kgccfe/gnu/config/ia64/ia64.c
sharugupta/OpenUH
daddd76858a53035f5d713f648d13373c22506e8
[ "BSD-2-Clause" ]
null
null
null
/* Definitions of target machine for GNU compiler. Copyright (C) 1999, 2000, 2001, 2002, 2003 Free Software Foundation, Inc. Contributed by James E. Wilson <wilson@cygnus.com> and David Mosberger <davidm@hpl.hp.com>. This file is part of GNU CC. GNU CC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. GNU CC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include "config.h" #include "system.h" #include "rtl.h" #include "tree.h" #include "regs.h" #include "hard-reg-set.h" #include "real.h" #include "insn-config.h" #include "conditions.h" #include "output.h" #include "insn-attr.h" #include "flags.h" #include "recog.h" #include "expr.h" #include "optabs.h" #include "except.h" #include "function.h" #include "ggc.h" #include "basic-block.h" #include "toplev.h" #include "sched-int.h" #include "timevar.h" #include "target.h" #include "target-def.h" #include "tm_p.h" #include "langhooks.h" /* This is used for communication between ASM_OUTPUT_LABEL and ASM_OUTPUT_LABELREF. */ int ia64_asm_output_label = 0; /* Define the information needed to generate branch and scc insns. This is stored from the compare operation. */ struct rtx_def * ia64_compare_op0; struct rtx_def * ia64_compare_op1; /* Register names for ia64_expand_prologue. */ static const char * const ia64_reg_numbers[96] = { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39", "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47", "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55", "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63", "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71", "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79", "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87", "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95", "r96", "r97", "r98", "r99", "r100","r101","r102","r103", "r104","r105","r106","r107","r108","r109","r110","r111", "r112","r113","r114","r115","r116","r117","r118","r119", "r120","r121","r122","r123","r124","r125","r126","r127"}; /* ??? These strings could be shared with REGISTER_NAMES. */ static const char * const ia64_input_reg_names[8] = { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" }; /* ??? These strings could be shared with REGISTER_NAMES. */ static const char * const ia64_local_reg_names[80] = { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7", "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15", "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23", "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31", "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39", "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47", "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55", "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63", "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71", "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" }; /* ??? These strings could be shared with REGISTER_NAMES. */ static const char * const ia64_output_reg_names[8] = { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" }; /* String used with the -mfixed-range= option. */ const char *ia64_fixed_range_string; /* Determines whether we use adds, addl, or movl to generate our TLS immediate offsets. */ int ia64_tls_size = 22; /* String used with the -mtls-size= option. */ const char *ia64_tls_size_string; /* Determines whether we run our final scheduling pass or not. We always avoid the normal second scheduling pass. */ static int ia64_flag_schedule_insns2; /* Variables which are this size or smaller are put in the sdata/sbss sections. */ unsigned int ia64_section_threshold; /* Structure to be filled in by ia64_compute_frame_size with register save masks and offsets for the current function. */ struct ia64_frame_info { HOST_WIDE_INT total_size; /* size of the stack frame, not including the caller's scratch area. */ HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */ HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */ HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */ HARD_REG_SET mask; /* mask of saved registers. */ unsigned int gr_used_mask; /* mask of registers in use as gr spill registers or long-term scratches. */ int n_spilled; /* number of spilled registers. */ int reg_fp; /* register for fp. */ int reg_save_b0; /* save register for b0. */ int reg_save_pr; /* save register for prs. */ int reg_save_ar_pfs; /* save register for ar.pfs. */ int reg_save_ar_unat; /* save register for ar.unat. */ int reg_save_ar_lc; /* save register for ar.lc. */ int reg_save_gp; /* save register for gp. */ int n_input_regs; /* number of input registers used. */ int n_local_regs; /* number of local registers used. */ int n_output_regs; /* number of output registers used. */ int n_rotate_regs; /* number of rotating registers used. */ char need_regstk; /* true if a .regstk directive needed. */ char initialized; /* true if the data is finalized. */ }; /* Current frame information calculated by ia64_compute_frame_size. */ static struct ia64_frame_info current_frame_info; static rtx gen_tls_get_addr PARAMS ((void)); static rtx gen_thread_pointer PARAMS ((void)); static int find_gr_spill PARAMS ((int)); static int next_scratch_gr_reg PARAMS ((void)); static void mark_reg_gr_used_mask PARAMS ((rtx, void *)); static void ia64_compute_frame_size PARAMS ((HOST_WIDE_INT)); static void setup_spill_pointers PARAMS ((int, rtx, HOST_WIDE_INT)); static void finish_spill_pointers PARAMS ((void)); static rtx spill_restore_mem PARAMS ((rtx, HOST_WIDE_INT)); static void do_spill PARAMS ((rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx)); static void do_restore PARAMS ((rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT)); static rtx gen_movdi_x PARAMS ((rtx, rtx, rtx)); static rtx gen_fr_spill_x PARAMS ((rtx, rtx, rtx)); static rtx gen_fr_restore_x PARAMS ((rtx, rtx, rtx)); static enum machine_mode hfa_element_mode PARAMS ((tree, int)); static void fix_range PARAMS ((const char *)); static struct machine_function * ia64_init_machine_status PARAMS ((void)); static void emit_insn_group_barriers PARAMS ((FILE *, rtx)); static void emit_all_insn_group_barriers PARAMS ((FILE *, rtx)); static void emit_predicate_relation_info PARAMS ((void)); static bool ia64_in_small_data_p PARAMS ((tree)); static void ia64_encode_section_info PARAMS ((tree, int)); static const char *ia64_strip_name_encoding PARAMS ((const char *)); static void process_epilogue PARAMS ((void)); static int process_set PARAMS ((FILE *, rtx)); static rtx ia64_expand_fetch_and_op PARAMS ((optab, enum machine_mode, tree, rtx)); static rtx ia64_expand_op_and_fetch PARAMS ((optab, enum machine_mode, tree, rtx)); static rtx ia64_expand_compare_and_swap PARAMS ((enum machine_mode, int, tree, rtx)); static rtx ia64_expand_lock_test_and_set PARAMS ((enum machine_mode, tree, rtx)); static rtx ia64_expand_lock_release PARAMS ((enum machine_mode, tree, rtx)); static bool ia64_assemble_integer PARAMS ((rtx, unsigned int, int)); static void ia64_output_function_prologue PARAMS ((FILE *, HOST_WIDE_INT)); static void ia64_output_function_epilogue PARAMS ((FILE *, HOST_WIDE_INT)); static void ia64_output_function_end_prologue PARAMS ((FILE *)); static int ia64_issue_rate PARAMS ((void)); static int ia64_adjust_cost PARAMS ((rtx, rtx, rtx, int)); static void ia64_sched_init PARAMS ((FILE *, int, int)); static void ia64_sched_finish PARAMS ((FILE *, int)); static int ia64_internal_sched_reorder PARAMS ((FILE *, int, rtx *, int *, int, int)); static int ia64_sched_reorder PARAMS ((FILE *, int, rtx *, int *, int)); static int ia64_sched_reorder2 PARAMS ((FILE *, int, rtx *, int *, int)); static int ia64_variable_issue PARAMS ((FILE *, int, rtx, int)); static void ia64_output_mi_thunk PARAMS ((FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree)); static void ia64_select_rtx_section PARAMS ((enum machine_mode, rtx, unsigned HOST_WIDE_INT)); static void ia64_rwreloc_select_section PARAMS ((tree, int, unsigned HOST_WIDE_INT)) ATTRIBUTE_UNUSED; static void ia64_rwreloc_unique_section PARAMS ((tree, int)) ATTRIBUTE_UNUSED; static void ia64_rwreloc_select_rtx_section PARAMS ((enum machine_mode, rtx, unsigned HOST_WIDE_INT)) ATTRIBUTE_UNUSED; static unsigned int ia64_rwreloc_section_type_flags PARAMS ((tree, const char *, int)) ATTRIBUTE_UNUSED; static void ia64_hpux_add_extern_decl PARAMS ((const char *name)) ATTRIBUTE_UNUSED; /* Table of valid machine attributes. */ static const struct attribute_spec ia64_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */ { "syscall_linkage", 0, 0, false, true, true, NULL }, { NULL, 0, 0, false, false, false, NULL } }; /* Initialize the GCC target structure. */ #undef TARGET_ATTRIBUTE_TABLE #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table #undef TARGET_INIT_BUILTINS #define TARGET_INIT_BUILTINS ia64_init_builtins #undef TARGET_EXPAND_BUILTIN #define TARGET_EXPAND_BUILTIN ia64_expand_builtin #undef TARGET_ASM_BYTE_OP #define TARGET_ASM_BYTE_OP "\tdata1\t" #undef TARGET_ASM_ALIGNED_HI_OP #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t" #undef TARGET_ASM_ALIGNED_SI_OP #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t" #undef TARGET_ASM_ALIGNED_DI_OP #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t" #undef TARGET_ASM_UNALIGNED_HI_OP #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t" #undef TARGET_ASM_UNALIGNED_SI_OP #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t" #undef TARGET_ASM_UNALIGNED_DI_OP #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t" #undef TARGET_ASM_INTEGER #define TARGET_ASM_INTEGER ia64_assemble_integer #undef TARGET_ASM_FUNCTION_PROLOGUE #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue #undef TARGET_ASM_FUNCTION_END_PROLOGUE #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue #undef TARGET_ASM_FUNCTION_EPILOGUE #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue #undef TARGET_IN_SMALL_DATA_P #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p #undef TARGET_ENCODE_SECTION_INFO #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info #undef TARGET_STRIP_NAME_ENCODING #define TARGET_STRIP_NAME_ENCODING ia64_strip_name_encoding #undef TARGET_SCHED_ADJUST_COST #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost #undef TARGET_SCHED_ISSUE_RATE #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate #undef TARGET_SCHED_VARIABLE_ISSUE #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue #undef TARGET_SCHED_INIT #define TARGET_SCHED_INIT ia64_sched_init #undef TARGET_SCHED_FINISH #define TARGET_SCHED_FINISH ia64_sched_finish #undef TARGET_SCHED_REORDER #define TARGET_SCHED_REORDER ia64_sched_reorder #undef TARGET_SCHED_REORDER2 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2 #ifdef HAVE_AS_TLS #undef TARGET_HAVE_TLS #define TARGET_HAVE_TLS true #endif #undef TARGET_ASM_OUTPUT_MI_THUNK #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true struct gcc_target targetm = TARGET_INITIALIZER; /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */ int call_operand (op, mode) rtx op; enum machine_mode mode; { if (mode != GET_MODE (op) && mode != VOIDmode) return 0; return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG)); } /* Return 1 if OP refers to a symbol in the sdata section. */ int sdata_symbolic_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { switch (GET_CODE (op)) { case CONST: if (GET_CODE (XEXP (op, 0)) != PLUS || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF) break; op = XEXP (XEXP (op, 0), 0); /* FALLTHRU */ case SYMBOL_REF: if (CONSTANT_POOL_ADDRESS_P (op)) return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold; else { const char *str = XSTR (op, 0); return (str[0] == ENCODE_SECTION_INFO_CHAR && str[1] == 's'); } default: break; } return 0; } /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */ int got_symbolic_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { switch (GET_CODE (op)) { case CONST: op = XEXP (op, 0); if (GET_CODE (op) != PLUS) return 0; if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF) return 0; op = XEXP (op, 1); if (GET_CODE (op) != CONST_INT) return 0; return 1; /* Ok if we're not using GOT entries at all. */ if (TARGET_NO_PIC || TARGET_AUTO_PIC) return 1; /* "Ok" while emitting rtl, since otherwise we won't be provided with the entire offset during emission, which makes it very hard to split the offset into high and low parts. */ if (rtx_equal_function_value_matters) return 1; /* Force the low 14 bits of the constant to zero so that we do not use up so many GOT entries. */ return (INTVAL (op) & 0x3fff) == 0; case SYMBOL_REF: case LABEL_REF: return 1; default: break; } return 0; } /* Return 1 if OP refers to a symbol. */ int symbolic_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { switch (GET_CODE (op)) { case CONST: case SYMBOL_REF: case LABEL_REF: return 1; default: break; } return 0; } /* Return tls_model if OP refers to a TLS symbol. */ int tls_symbolic_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { const char *str; if (GET_CODE (op) != SYMBOL_REF) return 0; str = XSTR (op, 0); if (str[0] != ENCODE_SECTION_INFO_CHAR) return 0; switch (str[1]) { case 'G': return TLS_MODEL_GLOBAL_DYNAMIC; case 'L': return TLS_MODEL_LOCAL_DYNAMIC; case 'i': return TLS_MODEL_INITIAL_EXEC; case 'l': return TLS_MODEL_LOCAL_EXEC; } return 0; } /* Return 1 if OP refers to a function. */ int function_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FLAG (op)) return 1; else return 0; } /* Return 1 if OP is setjmp or a similar function. */ /* ??? This is an unsatisfying solution. Should rethink. */ int setjmp_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { const char *name; int retval = 0; if (GET_CODE (op) != SYMBOL_REF) return 0; name = XSTR (op, 0); /* The following code is borrowed from special_function_p in calls.c. */ /* Disregard prefix _, __ or __x. */ if (name[0] == '_') { if (name[1] == '_' && name[2] == 'x') name += 3; else if (name[1] == '_') name += 2; else name += 1; } if (name[0] == 's') { retval = ((name[1] == 'e' && (! strcmp (name, "setjmp") || ! strcmp (name, "setjmp_syscall"))) || (name[1] == 'i' && ! strcmp (name, "sigsetjmp")) || (name[1] == 'a' && ! strcmp (name, "savectx"))); } else if ((name[0] == 'q' && name[1] == 's' && ! strcmp (name, "qsetjmp")) || (name[0] == 'v' && name[1] == 'f' && ! strcmp (name, "vfork"))) retval = 1; return retval; } /* Return 1 if OP is a general operand, but when pic exclude symbolic operands. */ /* ??? If we drop no-pic support, can delete SYMBOL_REF, CONST, and LABEL_REF from PREDICATE_CODES. */ int move_operand (op, mode) rtx op; enum machine_mode mode; { if (! TARGET_NO_PIC && symbolic_operand (op, mode)) return 0; return general_operand (op, mode); } /* Return 1 if OP is a register operand that is (or could be) a GR reg. */ int gr_register_operand (op, mode) rtx op; enum machine_mode mode; { if (! register_operand (op, mode)) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); if (GET_CODE (op) == REG) { unsigned int regno = REGNO (op); if (regno < FIRST_PSEUDO_REGISTER) return GENERAL_REGNO_P (regno); } return 1; } /* Return 1 if OP is a register operand that is (or could be) an FR reg. */ int fr_register_operand (op, mode) rtx op; enum machine_mode mode; { if (! register_operand (op, mode)) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); if (GET_CODE (op) == REG) { unsigned int regno = REGNO (op); if (regno < FIRST_PSEUDO_REGISTER) return FR_REGNO_P (regno); } return 1; } /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */ int grfr_register_operand (op, mode) rtx op; enum machine_mode mode; { if (! register_operand (op, mode)) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); if (GET_CODE (op) == REG) { unsigned int regno = REGNO (op); if (regno < FIRST_PSEUDO_REGISTER) return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno); } return 1; } /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */ int gr_nonimmediate_operand (op, mode) rtx op; enum machine_mode mode; { if (! nonimmediate_operand (op, mode)) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); if (GET_CODE (op) == REG) { unsigned int regno = REGNO (op); if (regno < FIRST_PSEUDO_REGISTER) return GENERAL_REGNO_P (regno); } return 1; } /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */ int fr_nonimmediate_operand (op, mode) rtx op; enum machine_mode mode; { if (! nonimmediate_operand (op, mode)) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); if (GET_CODE (op) == REG) { unsigned int regno = REGNO (op); if (regno < FIRST_PSEUDO_REGISTER) return FR_REGNO_P (regno); } return 1; } /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */ int grfr_nonimmediate_operand (op, mode) rtx op; enum machine_mode mode; { if (! nonimmediate_operand (op, mode)) return 0; if (GET_CODE (op) == SUBREG) op = SUBREG_REG (op); if (GET_CODE (op) == REG) { unsigned int regno = REGNO (op); if (regno < FIRST_PSEUDO_REGISTER) return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno); } return 1; } /* Return 1 if OP is a GR register operand, or zero. */ int gr_reg_or_0_operand (op, mode) rtx op; enum machine_mode mode; { return (op == const0_rtx || gr_register_operand (op, mode)); } /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */ int gr_reg_or_5bit_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */ int gr_reg_or_6bit_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */ int gr_reg_or_8bit_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */ int grfr_reg_or_8bit_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || grfr_register_operand (op, mode)); } /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate operand. */ int gr_reg_or_8bit_adjusted_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a register operand, or is valid for both an 8 bit immediate and an 8 bit adjusted immediate operand. This is necessary because when we emit a compare, we don't know what the condition will be, so we need the union of the immediates accepted by GT and LT. */ int gr_reg_or_8bit_and_adjusted_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)) && CONST_OK_FOR_L (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */ int gr_reg_or_14bit_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */ int gr_reg_or_22bit_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX || gr_register_operand (op, mode)); } /* Return 1 if OP is a 6 bit immediate operand. */ int shift_count_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op))) || GET_CODE (op) == CONSTANT_P_RTX); } /* Return 1 if OP is a 5 bit immediate operand. */ int shift_32bit_count_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { return ((GET_CODE (op) == CONST_INT && (INTVAL (op) >= 0 && INTVAL (op) < 32)) || GET_CODE (op) == CONSTANT_P_RTX); } /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */ int shladd_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { return (GET_CODE (op) == CONST_INT && (INTVAL (op) == 2 || INTVAL (op) == 4 || INTVAL (op) == 8 || INTVAL (op) == 16)); } /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */ int fetchadd_operand (op, mode) rtx op; enum machine_mode mode ATTRIBUTE_UNUSED; { return (GET_CODE (op) == CONST_INT && (INTVAL (op) == -16 || INTVAL (op) == -8 || INTVAL (op) == -4 || INTVAL (op) == -1 || INTVAL (op) == 1 || INTVAL (op) == 4 || INTVAL (op) == 8 || INTVAL (op) == 16)); } /* Return 1 if OP is a floating-point constant zero, one, or a register. */ int fr_reg_or_fp01_operand (op, mode) rtx op; enum machine_mode mode; { return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op)) || fr_register_operand (op, mode)); } /* Like nonimmediate_operand, but don't allow MEMs that try to use a POST_MODIFY with a REG as displacement. */ int destination_operand (op, mode) rtx op; enum machine_mode mode; { if (! nonimmediate_operand (op, mode)) return 0; if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == POST_MODIFY && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG) return 0; return 1; } /* Like memory_operand, but don't allow post-increments. */ int not_postinc_memory_operand (op, mode) rtx op; enum machine_mode mode; { return (memory_operand (op, mode) && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != 'a'); } /* Return 1 if this is a comparison operator, which accepts an normal 8-bit signed immediate operand. */ int normal_comparison_operator (op, mode) register rtx op; enum machine_mode mode; { enum rtx_code code = GET_CODE (op); return ((mode == VOIDmode || GET_MODE (op) == mode) && (code == EQ || code == NE || code == GT || code == LE || code == GTU || code == LEU)); } /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit signed immediate operand. */ int adjusted_comparison_operator (op, mode) register rtx op; enum machine_mode mode; { enum rtx_code code = GET_CODE (op); return ((mode == VOIDmode || GET_MODE (op) == mode) && (code == LT || code == GE || code == LTU || code == GEU)); } /* Return 1 if this is a signed inequality operator. */ int signed_inequality_operator (op, mode) register rtx op; enum machine_mode mode; { enum rtx_code code = GET_CODE (op); return ((mode == VOIDmode || GET_MODE (op) == mode) && (code == GE || code == GT || code == LE || code == LT)); } /* Return 1 if this operator is valid for predication. */ int predicate_operator (op, mode) register rtx op; enum machine_mode mode; { enum rtx_code code = GET_CODE (op); return ((GET_MODE (op) == mode || mode == VOIDmode) && (code == EQ || code == NE)); } /* Return 1 if this operator can be used in a conditional operation. */ int condop_operator (op, mode) register rtx op; enum machine_mode mode; { enum rtx_code code = GET_CODE (op); return ((GET_MODE (op) == mode || mode == VOIDmode) && (code == PLUS || code == MINUS || code == AND || code == IOR || code == XOR)); } /* Return 1 if this is the ar.lc register. */ int ar_lc_reg_operand (op, mode) register rtx op; enum machine_mode mode; { return (GET_MODE (op) == DImode && (mode == DImode || mode == VOIDmode) && GET_CODE (op) == REG && REGNO (op) == AR_LC_REGNUM); } /* Return 1 if this is the ar.ccv register. */ int ar_ccv_reg_operand (op, mode) register rtx op; enum machine_mode mode; { return ((GET_MODE (op) == mode || mode == VOIDmode) && GET_CODE (op) == REG && REGNO (op) == AR_CCV_REGNUM); } /* Return 1 if this is the ar.pfs register. */ int ar_pfs_reg_operand (op, mode) register rtx op; enum machine_mode mode; { return ((GET_MODE (op) == mode || mode == VOIDmode) && GET_CODE (op) == REG && REGNO (op) == AR_PFS_REGNUM); } /* Like general_operand, but don't allow (mem (addressof)). */ int general_tfmode_operand (op, mode) rtx op; enum machine_mode mode; { if (! general_operand (op, mode)) return 0; if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF) return 0; return 1; } /* Similarly. */ int destination_tfmode_operand (op, mode) rtx op; enum machine_mode mode; { if (! destination_operand (op, mode)) return 0; if (GET_CODE (op) == MEM && GET_CODE (XEXP (op, 0)) == ADDRESSOF) return 0; return 1; } /* Similarly. */ int tfreg_or_fp01_operand (op, mode) rtx op; enum machine_mode mode; { if (GET_CODE (op) == SUBREG) return 0; return fr_reg_or_fp01_operand (op, mode); } /* Return 1 if OP is valid as a base register in a reg + offset address. */ int basereg_operand (op, mode) rtx op; enum machine_mode mode; { /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected checks from pa.c basereg_operand as well? Seems to be OK without them in test runs. */ return (register_operand (op, mode) && REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op)); } /* Return 1 if the operands of a move are ok. */ int ia64_move_ok (dst, src) rtx dst, src; { /* If we're under init_recog_no_volatile, we'll not be able to use memory_operand. So check the code directly and don't worry about the validity of the underlying address, which should have been checked elsewhere anyway. */ if (GET_CODE (dst) != MEM) return 1; if (GET_CODE (src) == MEM) return 0; if (register_operand (src, VOIDmode)) return 1; /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */ if (INTEGRAL_MODE_P (GET_MODE (dst))) return src == const0_rtx; else return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src); } /* Return 0 if we are doing C++ code. This optimization fails with C++ because of GNAT c++/6685. */ int addp4_optimize_ok (op1, op2) rtx op1, op2; { if (!strcmp (lang_hooks.name, "GNU C++")) return 0; return (basereg_operand (op1, GET_MODE(op1)) != basereg_operand (op2, GET_MODE(op2))); } /* Check if OP is a mask suitible for use with SHIFT in a dep.z instruction. Return the length of the field, or <= 0 on failure. */ int ia64_depz_field_mask (rop, rshift) rtx rop, rshift; { unsigned HOST_WIDE_INT op = INTVAL (rop); unsigned HOST_WIDE_INT shift = INTVAL (rshift); /* Get rid of the zero bits we're shifting in. */ op >>= shift; /* We must now have a solid block of 1's at bit 0. */ return exact_log2 (op + 1); } /* Expand a symbolic constant load. */ /* ??? Should generalize this, so that we can also support 32 bit pointers. */ void ia64_expand_load_address (dest, src, scratch) rtx dest, src, scratch; { rtx temp; /* The destination could be a MEM during initial rtl generation, which isn't a valid destination for the PIC load address patterns. */ if (! register_operand (dest, DImode)) if (! scratch || ! register_operand (scratch, DImode)) temp = gen_reg_rtx (DImode); else temp = scratch; else temp = dest; if (tls_symbolic_operand (src, Pmode)) abort (); if (TARGET_AUTO_PIC) emit_insn (gen_load_gprel64 (temp, src)); else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FLAG (src)) emit_insn (gen_load_fptr (temp, src)); else if ((GET_MODE (src) == Pmode || GET_MODE (src) == ptr_mode) && sdata_symbolic_operand (src, VOIDmode)) emit_insn (gen_load_gprel (temp, src)); else if (GET_CODE (src) == CONST && GET_CODE (XEXP (src, 0)) == PLUS && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0) { rtx subtarget = no_new_pseudos ? temp : gen_reg_rtx (DImode); rtx sym = XEXP (XEXP (src, 0), 0); HOST_WIDE_INT ofs, hi, lo; /* Split the offset into a sign extended 14-bit low part and a complementary high part. */ ofs = INTVAL (XEXP (XEXP (src, 0), 1)); lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000; hi = ofs - lo; if (! scratch) scratch = no_new_pseudos ? subtarget : gen_reg_rtx (DImode); emit_insn (gen_load_symptr (subtarget, plus_constant (sym, hi), scratch)); emit_insn (gen_adddi3 (temp, subtarget, GEN_INT (lo))); } else { rtx insn; if (! scratch) scratch = no_new_pseudos ? temp : gen_reg_rtx (DImode); insn = emit_insn (gen_load_symptr (temp, src, scratch)); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE (temp) != GET_MODE (src)) src = convert_memory_address (GET_MODE (temp), src); #endif REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, src, REG_NOTES (insn)); } if (temp != dest) { if (GET_MODE (dest) != GET_MODE (temp)) temp = convert_to_mode (GET_MODE (dest), temp, 0); emit_move_insn (dest, temp); } } static GTY(()) rtx gen_tls_tga; static rtx gen_tls_get_addr () { if (!gen_tls_tga) { gen_tls_tga = init_one_libfunc ("__tls_get_addr"); } return gen_tls_tga; } static GTY(()) rtx thread_pointer_rtx; static rtx gen_thread_pointer () { if (!thread_pointer_rtx) { thread_pointer_rtx = gen_rtx_REG (Pmode, 13); RTX_UNCHANGING_P (thread_pointer_rtx) = 1; } return thread_pointer_rtx; } rtx ia64_expand_move (op0, op1) rtx op0, op1; { enum machine_mode mode = GET_MODE (op0); if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1)) op1 = force_reg (mode, op1); if (mode == Pmode || mode == ptr_mode) { enum tls_model tls_kind; if ((tls_kind = tls_symbolic_operand (op1, Pmode))) { rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns; switch (tls_kind) { case TLS_MODEL_GLOBAL_DYNAMIC: start_sequence (); tga_op1 = gen_reg_rtx (Pmode); emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1)); tga_op1 = gen_rtx_MEM (Pmode, tga_op1); RTX_UNCHANGING_P (tga_op1) = 1; tga_op2 = gen_reg_rtx (Pmode); emit_insn (gen_load_ltoff_dtprel (tga_op2, op1)); tga_op2 = gen_rtx_MEM (Pmode, tga_op2); RTX_UNCHANGING_P (tga_op2) = 1; tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX, LCT_CONST, Pmode, 2, tga_op1, Pmode, tga_op2, Pmode); insns = get_insns (); end_sequence (); emit_libcall_block (insns, op0, tga_ret, op1); return NULL_RTX; case TLS_MODEL_LOCAL_DYNAMIC: /* ??? This isn't the completely proper way to do local-dynamic If the call to __tls_get_addr is used only by a single symbol, then we should (somehow) move the dtprel to the second arg to avoid the extra add. */ start_sequence (); tga_op1 = gen_reg_rtx (Pmode); emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1)); tga_op1 = gen_rtx_MEM (Pmode, tga_op1); RTX_UNCHANGING_P (tga_op1) = 1; tga_op2 = const0_rtx; tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX, LCT_CONST, Pmode, 2, tga_op1, Pmode, tga_op2, Pmode); insns = get_insns (); end_sequence (); tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_LD_BASE); tmp = gen_reg_rtx (Pmode); emit_libcall_block (insns, tmp, tga_ret, tga_eqv); if (register_operand (op0, Pmode)) tga_ret = op0; else tga_ret = gen_reg_rtx (Pmode); if (TARGET_TLS64) { emit_insn (gen_load_dtprel (tga_ret, op1)); emit_insn (gen_adddi3 (tga_ret, tmp, tga_ret)); } else emit_insn (gen_add_dtprel (tga_ret, tmp, op1)); if (tga_ret == op0) return NULL_RTX; op1 = tga_ret; break; case TLS_MODEL_INITIAL_EXEC: tmp = gen_reg_rtx (Pmode); emit_insn (gen_load_ltoff_tprel (tmp, op1)); tmp = gen_rtx_MEM (Pmode, tmp); RTX_UNCHANGING_P (tmp) = 1; tmp = force_reg (Pmode, tmp); if (register_operand (op0, Pmode)) op1 = op0; else op1 = gen_reg_rtx (Pmode); emit_insn (gen_adddi3 (op1, tmp, gen_thread_pointer ())); if (op1 == op0) return NULL_RTX; break; case TLS_MODEL_LOCAL_EXEC: if (register_operand (op0, Pmode)) tmp = op0; else tmp = gen_reg_rtx (Pmode); if (TARGET_TLS64) { emit_insn (gen_load_tprel (tmp, op1)); emit_insn (gen_adddi3 (tmp, gen_thread_pointer (), tmp)); } else emit_insn (gen_add_tprel (tmp, gen_thread_pointer (), op1)); if (tmp == op0) return NULL_RTX; op1 = tmp; break; default: abort (); } } else if (!TARGET_NO_PIC && (symbolic_operand (op1, Pmode) || symbolic_operand (op1, ptr_mode))) { /* Before optimization starts, delay committing to any particular type of PIC address load. If this function gets deferred, we may acquire information that changes the value of the sdata_symbolic_operand predicate. But don't delay for function pointers. Loading a function address actually loads the address of the descriptor not the function. If we represent these as SYMBOL_REFs, then they get cse'd with calls, and we end up with calls to the descriptor address instead of calls to the function address. Functions are not candidates for sdata anyways. Don't delay for LABEL_REF because the splitter loses REG_LABEL notes. Don't delay for pool addresses on general principals; they'll never become non-local behind our back. */ if (rtx_equal_function_value_matters && GET_CODE (op1) != LABEL_REF && ! (GET_CODE (op1) == SYMBOL_REF && (SYMBOL_REF_FLAG (op1) || CONSTANT_POOL_ADDRESS_P (op1) || STRING_POOL_ADDRESS_P (op1)))) if (GET_MODE (op1) == DImode) emit_insn (gen_movdi_symbolic (op0, op1)); else emit_insn (gen_movsi_symbolic (op0, op1)); else ia64_expand_load_address (op0, op1, NULL_RTX); return NULL_RTX; } } return op1; } /* Split a post-reload TImode reference into two DImode components. */ rtx ia64_split_timode (out, in, scratch) rtx out[2]; rtx in, scratch; { switch (GET_CODE (in)) { case REG: out[0] = gen_rtx_REG (DImode, REGNO (in)); out[1] = gen_rtx_REG (DImode, REGNO (in) + 1); return NULL_RTX; case MEM: { rtx base = XEXP (in, 0); switch (GET_CODE (base)) { case REG: out[0] = adjust_address (in, DImode, 0); break; case POST_MODIFY: base = XEXP (base, 0); out[0] = adjust_address (in, DImode, 0); break; /* Since we're changing the mode, we need to change to POST_MODIFY as well to preserve the size of the increment. Either that or do the update in two steps, but we've already got this scratch register handy so let's use it. */ case POST_INC: base = XEXP (base, 0); out[0] = change_address (in, DImode, gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, 16))); break; case POST_DEC: base = XEXP (base, 0); out[0] = change_address (in, DImode, gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -16))); break; default: abort (); } if (scratch == NULL_RTX) abort (); out[1] = change_address (in, DImode, scratch); return gen_adddi3 (scratch, base, GEN_INT (8)); } case CONST_INT: case CONST_DOUBLE: split_double (in, &out[0], &out[1]); return NULL_RTX; default: abort (); } } /* ??? Fixing GR->FR TFmode moves during reload is hard. You need to go through memory plus an extra GR scratch register. Except that you can either get the first from SECONDARY_MEMORY_NEEDED or the second from SECONDARY_RELOAD_CLASS, but not both. We got into problems in the first place by allowing a construct like (subreg:TF (reg:TI)), which we got from a union containing a long double. This solution attempts to prevent this situation from occurring. When we see something like the above, we spill the inner register to memory. */ rtx spill_tfmode_operand (in, force) rtx in; int force; { if (GET_CODE (in) == SUBREG && GET_MODE (SUBREG_REG (in)) == TImode && GET_CODE (SUBREG_REG (in)) == REG) { rtx mem = gen_mem_addressof (SUBREG_REG (in), NULL_TREE, true); return gen_rtx_MEM (TFmode, copy_to_reg (XEXP (mem, 0))); } else if (force && GET_CODE (in) == REG) { rtx mem = gen_mem_addressof (in, NULL_TREE, true); return gen_rtx_MEM (TFmode, copy_to_reg (XEXP (mem, 0))); } else if (GET_CODE (in) == MEM && GET_CODE (XEXP (in, 0)) == ADDRESSOF) return change_address (in, TFmode, copy_to_reg (XEXP (in, 0))); else return in; } /* Emit comparison instruction if necessary, returning the expression that holds the compare result in the proper mode. */ rtx ia64_expand_compare (code, mode) enum rtx_code code; enum machine_mode mode; { rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1; rtx cmp; /* If we have a BImode input, then we already have a compare result, and do not need to emit another comparison. */ if (GET_MODE (op0) == BImode) { if ((code == NE || code == EQ) && op1 == const0_rtx) cmp = op0; else abort (); } else { cmp = gen_reg_rtx (BImode); emit_insn (gen_rtx_SET (VOIDmode, cmp, gen_rtx_fmt_ee (code, BImode, op0, op1))); code = NE; } return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx); } /* Emit the appropriate sequence for a call. */ void ia64_expand_call (retval, addr, nextarg, sibcall_p) rtx retval; rtx addr; rtx nextarg ATTRIBUTE_UNUSED; int sibcall_p; { rtx insn, b0; addr = XEXP (addr, 0); b0 = gen_rtx_REG (DImode, R_BR (0)); /* ??? Should do this for functions known to bind local too. */ if (TARGET_NO_PIC || TARGET_AUTO_PIC) { if (sibcall_p) insn = gen_sibcall_nogp (addr); else if (! retval) insn = gen_call_nogp (addr, b0); else insn = gen_call_value_nogp (retval, addr, b0); insn = emit_call_insn (insn); } else { if (sibcall_p) insn = gen_sibcall_gp (addr); else if (! retval) insn = gen_call_gp (addr, b0); else insn = gen_call_value_gp (retval, addr, b0); insn = emit_call_insn (insn); use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx); } if (sibcall_p) { use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0); use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (DImode, AR_PFS_REGNUM)); } } void ia64_reload_gp () { rtx tmp; if (current_frame_info.reg_save_gp) tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp); else { HOST_WIDE_INT offset; offset = (current_frame_info.spill_cfa_off + current_frame_info.spill_size); if (frame_pointer_needed) { tmp = hard_frame_pointer_rtx; offset = -offset; } else { tmp = stack_pointer_rtx; offset = current_frame_info.total_size - offset; } if (CONST_OK_FOR_I (offset)) emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, GEN_INT (offset))); else { emit_move_insn (pic_offset_table_rtx, GEN_INT (offset)); emit_insn (gen_adddi3 (pic_offset_table_rtx, pic_offset_table_rtx, tmp)); } tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx); } emit_move_insn (pic_offset_table_rtx, tmp); } void ia64_split_call (retval, addr, retaddr, scratch_r, scratch_b, noreturn_p, sibcall_p) rtx retval, addr, retaddr, scratch_r, scratch_b; int noreturn_p, sibcall_p; { rtx insn; bool is_desc = false; /* If we find we're calling through a register, then we're actually calling through a descriptor, so load up the values. */ if (REG_P (addr)) { rtx tmp; bool addr_dead_p; /* ??? We are currently constrained to *not* use peep2, because we can legitimiately change the global lifetime of the GP (in the form of killing where previously live). This is because a call through a descriptor doesn't use the previous value of the GP, while a direct call does, and we do not commit to either form until the split here. That said, this means that we lack precise life info for whether ADDR is dead after this call. This is not terribly important, since we can fix things up essentially for free with the POST_DEC below, but it's nice to not use it when we can immediately tell it's not necessary. */ addr_dead_p = ((noreturn_p || sibcall_p || TEST_HARD_REG_BIT (regs_invalidated_by_call, REGNO (addr))) && !FUNCTION_ARG_REGNO_P (REGNO (addr))); /* Load the code address into scratch_b. */ tmp = gen_rtx_POST_INC (Pmode, addr); tmp = gen_rtx_MEM (Pmode, tmp); emit_move_insn (scratch_r, tmp); emit_move_insn (scratch_b, scratch_r); /* Load the GP address. If ADDR is not dead here, then we must revert the change made above via the POST_INCREMENT. */ if (!addr_dead_p) tmp = gen_rtx_POST_DEC (Pmode, addr); else tmp = addr; tmp = gen_rtx_MEM (Pmode, tmp); emit_move_insn (pic_offset_table_rtx, tmp); is_desc = true; addr = scratch_b; } if (sibcall_p) insn = gen_sibcall_nogp (addr); else if (retval) insn = gen_call_value_nogp (retval, addr, retaddr); else insn = gen_call_nogp (addr, retaddr); emit_call_insn (insn); if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p) ia64_reload_gp (); } /* Begin the assembly file. */ void emit_safe_across_calls (f) FILE *f; { unsigned int rs, re; int out_state; rs = 1; out_state = 0; while (1) { while (rs < 64 && call_used_regs[PR_REG (rs)]) rs++; if (rs >= 64) break; for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++) continue; if (out_state == 0) { fputs ("\t.pred.safe_across_calls ", f); out_state = 1; } else fputc (',', f); if (re == rs + 1) fprintf (f, "p%u", rs); else fprintf (f, "p%u-p%u", rs, re - 1); rs = re + 1; } if (out_state) fputc ('\n', f); } /* Helper function for ia64_compute_frame_size: find an appropriate general register to spill some special register to. SPECIAL_SPILL_MASK contains bits in GR0 to GR31 that have already been allocated by this routine. TRY_LOCALS is true if we should attempt to locate a local regnum. */ static int find_gr_spill (try_locals) int try_locals; { int regno; /* If this is a leaf function, first try an otherwise unused call-clobbered register. */ if (current_function_is_leaf) { for (regno = GR_REG (1); regno <= GR_REG (31); regno++) if (! regs_ever_live[regno] && call_used_regs[regno] && ! fixed_regs[regno] && ! global_regs[regno] && ((current_frame_info.gr_used_mask >> regno) & 1) == 0) { current_frame_info.gr_used_mask |= 1 << regno; return regno; } } if (try_locals) { regno = current_frame_info.n_local_regs; /* If there is a frame pointer, then we can't use loc79, because that is HARD_FRAME_POINTER_REGNUM. In particular, see the reg_name switching code in ia64_expand_prologue. */ if (regno < (80 - frame_pointer_needed)) { current_frame_info.n_local_regs = regno + 1; return LOC_REG (0) + regno; } } /* Failed to find a general register to spill to. Must use stack. */ return 0; } /* In order to make for nice schedules, we try to allocate every temporary to a different register. We must of course stay away from call-saved, fixed, and global registers. We must also stay away from registers allocated in current_frame_info.gr_used_mask, since those include regs used all through the prologue. Any register allocated here must be used immediately. The idea is to aid scheduling, not to solve data flow problems. */ static int last_scratch_gr_reg; static int next_scratch_gr_reg () { int i, regno; for (i = 0; i < 32; ++i) { regno = (last_scratch_gr_reg + i + 1) & 31; if (call_used_regs[regno] && ! fixed_regs[regno] && ! global_regs[regno] && ((current_frame_info.gr_used_mask >> regno) & 1) == 0) { last_scratch_gr_reg = regno; return regno; } } /* There must be _something_ available. */ abort (); } /* Helper function for ia64_compute_frame_size, called through diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */ static void mark_reg_gr_used_mask (reg, data) rtx reg; void *data ATTRIBUTE_UNUSED; { unsigned int regno = REGNO (reg); if (regno < 32) { unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg)); for (i = 0; i < n; ++i) current_frame_info.gr_used_mask |= 1 << (regno + i); } } /* Returns the number of bytes offset between the frame pointer and the stack pointer for the current function. SIZE is the number of bytes of space needed for local variables. */ static void ia64_compute_frame_size (size) HOST_WIDE_INT size; { HOST_WIDE_INT total_size; HOST_WIDE_INT spill_size = 0; HOST_WIDE_INT extra_spill_size = 0; HOST_WIDE_INT pretend_args_size; HARD_REG_SET mask; int n_spilled = 0; int spilled_gr_p = 0; int spilled_fr_p = 0; unsigned int regno; int i; if (current_frame_info.initialized) return; memset (&current_frame_info, 0, sizeof current_frame_info); CLEAR_HARD_REG_SET (mask); /* Don't allocate scratches to the return register. */ diddle_return_value (mark_reg_gr_used_mask, NULL); /* Don't allocate scratches to the EH scratch registers. */ if (cfun->machine->ia64_eh_epilogue_sp) mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL); if (cfun->machine->ia64_eh_epilogue_bsp) mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL); /* Find the size of the register stack frame. We have only 80 local registers, because we reserve 8 for the inputs and 8 for the outputs. */ /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed, since we'll be adjusting that down later. */ regno = LOC_REG (78) + ! frame_pointer_needed; for (; regno >= LOC_REG (0); regno--) if (regs_ever_live[regno]) break; current_frame_info.n_local_regs = regno - LOC_REG (0) + 1; /* For functions marked with the syscall_linkage attribute, we must mark all eight input registers as in use, so that locals aren't visible to the caller. */ if (cfun->machine->n_varargs > 0 || lookup_attribute ("syscall_linkage", TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl)))) current_frame_info.n_input_regs = 8; else { for (regno = IN_REG (7); regno >= IN_REG (0); regno--) if (regs_ever_live[regno]) break; current_frame_info.n_input_regs = regno - IN_REG (0) + 1; } for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--) if (regs_ever_live[regno]) break; i = regno - OUT_REG (0) + 1; /* When -p profiling, we need one output register for the mcount argument. Likwise for -a profiling for the bb_init_func argument. For -ax profiling, we need two output registers for the two bb_init_trace_func arguments. */ if (current_function_profile) i = MAX (i, 1); current_frame_info.n_output_regs = i; /* ??? No rotating register support yet. */ current_frame_info.n_rotate_regs = 0; /* Discover which registers need spilling, and how much room that will take. Begin with floating point and general registers, which will always wind up on the stack. */ for (regno = FR_REG (2); regno <= FR_REG (127); regno++) if (regs_ever_live[regno] && ! call_used_regs[regno]) { SET_HARD_REG_BIT (mask, regno); spill_size += 16; n_spilled += 1; spilled_fr_p = 1; } for (regno = GR_REG (1); regno <= GR_REG (31); regno++) if (regs_ever_live[regno] && ! call_used_regs[regno]) { SET_HARD_REG_BIT (mask, regno); spill_size += 8; n_spilled += 1; spilled_gr_p = 1; } for (regno = BR_REG (1); regno <= BR_REG (7); regno++) if (regs_ever_live[regno] && ! call_used_regs[regno]) { SET_HARD_REG_BIT (mask, regno); spill_size += 8; n_spilled += 1; } /* Now come all special registers that might get saved in other general registers. */ if (frame_pointer_needed) { current_frame_info.reg_fp = find_gr_spill (1); /* If we did not get a register, then we take LOC79. This is guaranteed to be free, even if regs_ever_live is already set, because this is HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs, as we don't count loc79 above. */ if (current_frame_info.reg_fp == 0) { current_frame_info.reg_fp = LOC_REG (79); current_frame_info.n_local_regs++; } } if (! current_function_is_leaf) { /* Emit a save of BR0 if we call other functions. Do this even if this function doesn't return, as EH depends on this to be able to unwind the stack. */ SET_HARD_REG_BIT (mask, BR_REG (0)); current_frame_info.reg_save_b0 = find_gr_spill (1); if (current_frame_info.reg_save_b0 == 0) { spill_size += 8; n_spilled += 1; } /* Similarly for ar.pfs. */ SET_HARD_REG_BIT (mask, AR_PFS_REGNUM); current_frame_info.reg_save_ar_pfs = find_gr_spill (1); if (current_frame_info.reg_save_ar_pfs == 0) { extra_spill_size += 8; n_spilled += 1; } /* Similarly for gp. Note that if we're calling setjmp, the stacked registers are clobbered, so we fall back to the stack. */ current_frame_info.reg_save_gp = (current_function_calls_setjmp ? 0 : find_gr_spill (1)); if (current_frame_info.reg_save_gp == 0) { SET_HARD_REG_BIT (mask, GR_REG (1)); spill_size += 8; n_spilled += 1; } } else { if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)]) { SET_HARD_REG_BIT (mask, BR_REG (0)); spill_size += 8; n_spilled += 1; } if (regs_ever_live[AR_PFS_REGNUM]) { SET_HARD_REG_BIT (mask, AR_PFS_REGNUM); current_frame_info.reg_save_ar_pfs = find_gr_spill (1); if (current_frame_info.reg_save_ar_pfs == 0) { extra_spill_size += 8; n_spilled += 1; } } } /* Unwind descriptor hackery: things are most efficient if we allocate consecutive GR save registers for RP, PFS, FP in that order. However, it is absolutely critical that FP get the only hard register that's guaranteed to be free, so we allocated it first. If all three did happen to be allocated hard regs, and are consecutive, rearrange them into the preferred order now. */ if (current_frame_info.reg_fp != 0 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2) { current_frame_info.reg_save_b0 = current_frame_info.reg_fp; current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1; current_frame_info.reg_fp = current_frame_info.reg_fp + 2; } /* See if we need to store the predicate register block. */ for (regno = PR_REG (0); regno <= PR_REG (63); regno++) if (regs_ever_live[regno] && ! call_used_regs[regno]) break; if (regno <= PR_REG (63)) { SET_HARD_REG_BIT (mask, PR_REG (0)); current_frame_info.reg_save_pr = find_gr_spill (1); if (current_frame_info.reg_save_pr == 0) { extra_spill_size += 8; n_spilled += 1; } /* ??? Mark them all as used so that register renaming and such are free to use them. */ for (regno = PR_REG (0); regno <= PR_REG (63); regno++) regs_ever_live[regno] = 1; } /* If we're forced to use st8.spill, we're forced to save and restore ar.unat as well. The check for existing liveness allows inline asm to touch ar.unat. */ if (spilled_gr_p || cfun->machine->n_varargs || regs_ever_live[AR_UNAT_REGNUM]) { regs_ever_live[AR_UNAT_REGNUM] = 1; SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM); current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0); if (current_frame_info.reg_save_ar_unat == 0) { extra_spill_size += 8; n_spilled += 1; } } if (regs_ever_live[AR_LC_REGNUM]) { SET_HARD_REG_BIT (mask, AR_LC_REGNUM); current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0); if (current_frame_info.reg_save_ar_lc == 0) { extra_spill_size += 8; n_spilled += 1; } } /* If we have an odd number of words of pretend arguments written to the stack, then the FR save area will be unaligned. We round the size of this area up to keep things 16 byte aligned. */ if (spilled_fr_p) pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size); else pretend_args_size = current_function_pretend_args_size; total_size = (spill_size + extra_spill_size + size + pretend_args_size + current_function_outgoing_args_size); total_size = IA64_STACK_ALIGN (total_size); /* We always use the 16-byte scratch area provided by the caller, but if we are a leaf function, there's no one to which we need to provide a scratch area. */ if (current_function_is_leaf) total_size = MAX (0, total_size - 16); current_frame_info.total_size = total_size; current_frame_info.spill_cfa_off = pretend_args_size - 16; current_frame_info.spill_size = spill_size; current_frame_info.extra_spill_size = extra_spill_size; COPY_HARD_REG_SET (current_frame_info.mask, mask); current_frame_info.n_spilled = n_spilled; current_frame_info.initialized = reload_completed; } /* Compute the initial difference between the specified pair of registers. */ HOST_WIDE_INT ia64_initial_elimination_offset (from, to) int from, to; { HOST_WIDE_INT offset; ia64_compute_frame_size (get_frame_size ()); switch (from) { case FRAME_POINTER_REGNUM: if (to == HARD_FRAME_POINTER_REGNUM) { if (current_function_is_leaf) offset = -current_frame_info.total_size; else offset = -(current_frame_info.total_size - current_function_outgoing_args_size - 16); } else if (to == STACK_POINTER_REGNUM) { if (current_function_is_leaf) offset = 0; else offset = 16 + current_function_outgoing_args_size; } else abort (); break; case ARG_POINTER_REGNUM: /* Arguments start above the 16 byte save area, unless stdarg in which case we store through the 16 byte save area. */ if (to == HARD_FRAME_POINTER_REGNUM) offset = 16 - current_function_pretend_args_size; else if (to == STACK_POINTER_REGNUM) offset = (current_frame_info.total_size + 16 - current_function_pretend_args_size); else abort (); break; case RETURN_ADDRESS_POINTER_REGNUM: offset = 0; break; default: abort (); } return offset; } /* If there are more than a trivial number of register spills, we use two interleaved iterators so that we can get two memory references per insn group. In order to simplify things in the prologue and epilogue expanders, we use helper functions to fix up the memory references after the fact with the appropriate offsets to a POST_MODIFY memory mode. The following data structure tracks the state of the two iterators while insns are being emitted. */ struct spill_fill_data { rtx init_after; /* point at which to emit initializations */ rtx init_reg[2]; /* initial base register */ rtx iter_reg[2]; /* the iterator registers */ rtx *prev_addr[2]; /* address of last memory use */ rtx prev_insn[2]; /* the insn corresponding to prev_addr */ HOST_WIDE_INT prev_off[2]; /* last offset */ int n_iter; /* number of iterators in use */ int next_iter; /* next iterator to use */ unsigned int save_gr_used_mask; }; static struct spill_fill_data spill_fill_data; static void setup_spill_pointers (n_spills, init_reg, cfa_off) int n_spills; rtx init_reg; HOST_WIDE_INT cfa_off; { int i; spill_fill_data.init_after = get_last_insn (); spill_fill_data.init_reg[0] = init_reg; spill_fill_data.init_reg[1] = init_reg; spill_fill_data.prev_addr[0] = NULL; spill_fill_data.prev_addr[1] = NULL; spill_fill_data.prev_insn[0] = NULL; spill_fill_data.prev_insn[1] = NULL; spill_fill_data.prev_off[0] = cfa_off; spill_fill_data.prev_off[1] = cfa_off; spill_fill_data.next_iter = 0; spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask; spill_fill_data.n_iter = 1 + (n_spills > 2); for (i = 0; i < spill_fill_data.n_iter; ++i) { int regno = next_scratch_gr_reg (); spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno); current_frame_info.gr_used_mask |= 1 << regno; } } static void finish_spill_pointers () { current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask; } static rtx spill_restore_mem (reg, cfa_off) rtx reg; HOST_WIDE_INT cfa_off; { int iter = spill_fill_data.next_iter; HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off; rtx disp_rtx = GEN_INT (disp); rtx mem; if (spill_fill_data.prev_addr[iter]) { if (CONST_OK_FOR_N (disp)) { *spill_fill_data.prev_addr[iter] = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter], gen_rtx_PLUS (DImode, spill_fill_data.iter_reg[iter], disp_rtx)); REG_NOTES (spill_fill_data.prev_insn[iter]) = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter], REG_NOTES (spill_fill_data.prev_insn[iter])); } else { /* ??? Could use register post_modify for loads. */ if (! CONST_OK_FOR_I (disp)) { rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ()); emit_move_insn (tmp, disp_rtx); disp_rtx = tmp; } emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter], spill_fill_data.iter_reg[iter], disp_rtx)); } } /* Micro-optimization: if we've created a frame pointer, it's at CFA 0, which may allow the real iterator to be initialized lower, slightly increasing parallelism. Also, if there are few saves it may eliminate the iterator entirely. */ else if (disp == 0 && spill_fill_data.init_reg[iter] == stack_pointer_rtx && frame_pointer_needed) { mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx); set_mem_alias_set (mem, get_varargs_alias_set ()); return mem; } else { rtx seq, insn; if (disp == 0) seq = gen_movdi (spill_fill_data.iter_reg[iter], spill_fill_data.init_reg[iter]); else { start_sequence (); if (! CONST_OK_FOR_I (disp)) { rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ()); emit_move_insn (tmp, disp_rtx); disp_rtx = tmp; } emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter], spill_fill_data.init_reg[iter], disp_rtx)); seq = get_insns (); end_sequence (); } /* Careful for being the first insn in a sequence. */ if (spill_fill_data.init_after) insn = emit_insn_after (seq, spill_fill_data.init_after); else { rtx first = get_insns (); if (first) insn = emit_insn_before (seq, first); else insn = emit_insn (seq); } spill_fill_data.init_after = insn; /* If DISP is 0, we may or may not have a further adjustment afterward. If we do, then the load/store insn may be modified to be a post-modify. If we don't, then this copy may be eliminated by copyprop_hardreg_forward, which makes this insn garbage, which runs afoul of the sanity check in propagate_one_insn. So mark this insn as legal to delete. */ if (disp == 0) REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn)); } mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]); /* ??? Not all of the spills are for varargs, but some of them are. The rest of the spills belong in an alias set of their own. But it doesn't actually hurt to include them here. */ set_mem_alias_set (mem, get_varargs_alias_set ()); spill_fill_data.prev_addr[iter] = &XEXP (mem, 0); spill_fill_data.prev_off[iter] = cfa_off; if (++iter >= spill_fill_data.n_iter) iter = 0; spill_fill_data.next_iter = iter; return mem; } static void do_spill (move_fn, reg, cfa_off, frame_reg) rtx (*move_fn) PARAMS ((rtx, rtx, rtx)); rtx reg, frame_reg; HOST_WIDE_INT cfa_off; { int iter = spill_fill_data.next_iter; rtx mem, insn; mem = spill_restore_mem (reg, cfa_off); insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off))); spill_fill_data.prev_insn[iter] = insn; if (frame_reg) { rtx base; HOST_WIDE_INT off; RTX_FRAME_RELATED_P (insn) = 1; /* Don't even pretend that the unwind code can intuit its way through a pair of interleaved post_modify iterators. Just provide the correct answer. */ if (frame_pointer_needed) { base = hard_frame_pointer_rtx; off = - cfa_off; } else { base = stack_pointer_rtx; off = current_frame_info.total_size - cfa_off; } REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, gen_rtx_SET (VOIDmode, gen_rtx_MEM (GET_MODE (reg), plus_constant (base, off)), frame_reg), REG_NOTES (insn)); } } static void do_restore (move_fn, reg, cfa_off) rtx (*move_fn) PARAMS ((rtx, rtx, rtx)); rtx reg; HOST_WIDE_INT cfa_off; { int iter = spill_fill_data.next_iter; rtx insn; insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off), GEN_INT (cfa_off))); spill_fill_data.prev_insn[iter] = insn; } /* Wrapper functions that discards the CONST_INT spill offset. These exist so that we can give gr_spill/gr_fill the offset they need and use a consistant function interface. */ static rtx gen_movdi_x (dest, src, offset) rtx dest, src; rtx offset ATTRIBUTE_UNUSED; { return gen_movdi (dest, src); } static rtx gen_fr_spill_x (dest, src, offset) rtx dest, src; rtx offset ATTRIBUTE_UNUSED; { return gen_fr_spill (dest, src); } static rtx gen_fr_restore_x (dest, src, offset) rtx dest, src; rtx offset ATTRIBUTE_UNUSED; { return gen_fr_restore (dest, src); } /* Called after register allocation to add any instructions needed for the prologue. Using a prologue insn is favored compared to putting all of the instructions in output_function_prologue(), since it allows the scheduler to intermix instructions with the saves of the caller saved registers. In some cases, it might be necessary to emit a barrier instruction as the last insn to prevent such scheduling. Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1 so that the debug info generation code can handle them properly. The register save area is layed out like so: cfa+16 [ varargs spill area ] [ fr register spill area ] [ br register spill area ] [ ar register spill area ] [ pr register spill area ] [ gr register spill area ] */ /* ??? Get inefficient code when the frame size is larger than can fit in an adds instruction. */ void ia64_expand_prologue () { rtx insn, ar_pfs_save_reg, ar_unat_save_reg; int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs; rtx reg, alt_reg; ia64_compute_frame_size (get_frame_size ()); last_scratch_gr_reg = 15; /* If there is no epilogue, then we don't need some prologue insns. We need to avoid emitting the dead prologue insns, because flow will complain about them. */ if (optimize) { edge e; for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next) if ((e->flags & EDGE_FAKE) == 0 && (e->flags & EDGE_FALLTHRU) != 0) break; epilogue_p = (e != NULL); } else epilogue_p = 1; /* Set the local, input, and output register names. We need to do this for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in half. If we use in/loc/out register names, then we get assembler errors in crtn.S because there is no alloc insn or regstk directive in there. */ if (! TARGET_REG_NAMES) { int inputs = current_frame_info.n_input_regs; int locals = current_frame_info.n_local_regs; int outputs = current_frame_info.n_output_regs; for (i = 0; i < inputs; i++) reg_names[IN_REG (i)] = ia64_reg_numbers[i]; for (i = 0; i < locals; i++) reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i]; for (i = 0; i < outputs; i++) reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i]; } /* Set the frame pointer register name. The regnum is logically loc79, but of course we'll not have allocated that many locals. Rather than worrying about renumbering the existing rtxs, we adjust the name. */ /* ??? This code means that we can never use one local register when there is a frame pointer. loc79 gets wasted in this case, as it is renamed to a register that will never be used. See also the try_locals code in find_gr_spill. */ if (current_frame_info.reg_fp) { const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM]; reg_names[HARD_FRAME_POINTER_REGNUM] = reg_names[current_frame_info.reg_fp]; reg_names[current_frame_info.reg_fp] = tmp; } /* Fix up the return address placeholder. */ /* ??? We can fail if __builtin_return_address is used, and we didn't allocate a register in which to save b0. I can't think of a way to eliminate RETURN_ADDRESS_POINTER_REGNUM to a local register and then be sure that I got the right one. Further, reload doesn't seem to care if an eliminable register isn't used, and "eliminates" it anyway. */ if (regs_ever_live[RETURN_ADDRESS_POINTER_REGNUM] && current_frame_info.reg_save_b0 != 0) XINT (return_address_pointer_rtx, 0) = current_frame_info.reg_save_b0; /* We don't need an alloc instruction if we've used no outputs or locals. */ if (current_frame_info.n_local_regs == 0 && current_frame_info.n_output_regs == 0 && current_frame_info.n_input_regs <= current_function_args_info.int_regs && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)) { /* If there is no alloc, but there are input registers used, then we need a .regstk directive. */ current_frame_info.need_regstk = (TARGET_REG_NAMES != 0); ar_pfs_save_reg = NULL_RTX; } else { current_frame_info.need_regstk = 0; if (current_frame_info.reg_save_ar_pfs) regno = current_frame_info.reg_save_ar_pfs; else regno = next_scratch_gr_reg (); ar_pfs_save_reg = gen_rtx_REG (DImode, regno); insn = emit_insn (gen_alloc (ar_pfs_save_reg, GEN_INT (current_frame_info.n_input_regs), GEN_INT (current_frame_info.n_local_regs), GEN_INT (current_frame_info.n_output_regs), GEN_INT (current_frame_info.n_rotate_regs))); RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0); } /* Set up frame pointer, stack pointer, and spill iterators. */ n_varargs = cfun->machine->n_varargs; setup_spill_pointers (current_frame_info.n_spilled + n_varargs, stack_pointer_rtx, 0); if (frame_pointer_needed) { insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; } if (current_frame_info.total_size != 0) { rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size); rtx offset; if (CONST_OK_FOR_I (- current_frame_info.total_size)) offset = frame_size_rtx; else { regno = next_scratch_gr_reg (); offset = gen_rtx_REG (DImode, regno); emit_move_insn (offset, frame_size_rtx); } insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, offset)); if (! frame_pointer_needed) { RTX_FRAME_RELATED_P (insn) = 1; if (GET_CODE (offset) != CONST_INT) { REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, gen_rtx_SET (VOIDmode, stack_pointer_rtx, gen_rtx_PLUS (DImode, stack_pointer_rtx, frame_size_rtx)), REG_NOTES (insn)); } } /* ??? At this point we must generate a magic insn that appears to modify the stack pointer, the frame pointer, and all spill iterators. This would allow the most scheduling freedom. For now, just hard stop. */ emit_insn (gen_blockage ()); } /* Must copy out ar.unat before doing any integer spills. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)) { if (current_frame_info.reg_save_ar_unat) ar_unat_save_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat); else { alt_regno = next_scratch_gr_reg (); ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno); current_frame_info.gr_used_mask |= 1 << alt_regno; } reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM); insn = emit_move_insn (ar_unat_save_reg, reg); RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0); /* Even if we're not going to generate an epilogue, we still need to save the register so that EH works. */ if (! epilogue_p && current_frame_info.reg_save_ar_unat) emit_insn (gen_prologue_use (ar_unat_save_reg)); } else ar_unat_save_reg = NULL_RTX; /* Spill all varargs registers. Do this before spilling any GR registers, since we want the UNAT bits for the GR registers to override the UNAT bits from varargs, which we don't care about. */ cfa_off = -16; for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno) { reg = gen_rtx_REG (DImode, regno); do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX); } /* Locate the bottom of the register save area. */ cfa_off = (current_frame_info.spill_cfa_off + current_frame_info.spill_size + current_frame_info.extra_spill_size); /* Save the predicate register block either in a register or in memory. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0))) { reg = gen_rtx_REG (DImode, PR_REG (0)); if (current_frame_info.reg_save_pr != 0) { alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr); insn = emit_move_insn (alt_reg, reg); /* ??? Denote pr spill/fill by a DImode move that modifies all 64 hard registers. */ RTX_FRAME_RELATED_P (insn) = 1; REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, gen_rtx_SET (VOIDmode, alt_reg, reg), REG_NOTES (insn)); /* Even if we're not going to generate an epilogue, we still need to save the register so that EH works. */ if (! epilogue_p) emit_insn (gen_prologue_use (alt_reg)); } else { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); insn = emit_move_insn (alt_reg, reg); do_spill (gen_movdi_x, alt_reg, cfa_off, reg); cfa_off -= 8; } } /* Handle AR regs in numerical order. All of them get special handling. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM) && current_frame_info.reg_save_ar_unat == 0) { reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM); do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg); cfa_off -= 8; } /* The alloc insn already copied ar.pfs into a general register. The only thing we have to do now is copy that register to a stack slot if we'd not allocated a local register for the job. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM) && current_frame_info.reg_save_ar_pfs == 0) { reg = gen_rtx_REG (DImode, AR_PFS_REGNUM); do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg); cfa_off -= 8; } if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM)) { reg = gen_rtx_REG (DImode, AR_LC_REGNUM); if (current_frame_info.reg_save_ar_lc != 0) { alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc); insn = emit_move_insn (alt_reg, reg); RTX_FRAME_RELATED_P (insn) = 1; /* Even if we're not going to generate an epilogue, we still need to save the register so that EH works. */ if (! epilogue_p) emit_insn (gen_prologue_use (alt_reg)); } else { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); emit_move_insn (alt_reg, reg); do_spill (gen_movdi_x, alt_reg, cfa_off, reg); cfa_off -= 8; } } if (current_frame_info.reg_save_gp) { insn = emit_move_insn (gen_rtx_REG (DImode, current_frame_info.reg_save_gp), pic_offset_table_rtx); /* We don't know for sure yet if this is actually needed, since we've not split the PIC call patterns. If all of the calls are indirect, and not followed by any uses of the gp, then this save is dead. Allow it to go away. */ REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn)); } /* We should now be at the base of the gr/br/fr spill area. */ if (cfa_off != (current_frame_info.spill_cfa_off + current_frame_info.spill_size)) abort (); /* Spill all general registers. */ for (regno = GR_REG (1); regno <= GR_REG (31); ++regno) if (TEST_HARD_REG_BIT (current_frame_info.mask, regno)) { reg = gen_rtx_REG (DImode, regno); do_spill (gen_gr_spill, reg, cfa_off, reg); cfa_off -= 8; } /* Handle BR0 specially -- it may be getting stored permanently in some GR register. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0))) { reg = gen_rtx_REG (DImode, BR_REG (0)); if (current_frame_info.reg_save_b0 != 0) { alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0); insn = emit_move_insn (alt_reg, reg); RTX_FRAME_RELATED_P (insn) = 1; /* Even if we're not going to generate an epilogue, we still need to save the register so that EH works. */ if (! epilogue_p) emit_insn (gen_prologue_use (alt_reg)); } else { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); emit_move_insn (alt_reg, reg); do_spill (gen_movdi_x, alt_reg, cfa_off, reg); cfa_off -= 8; } } /* Spill the rest of the BR registers. */ for (regno = BR_REG (1); regno <= BR_REG (7); ++regno) if (TEST_HARD_REG_BIT (current_frame_info.mask, regno)) { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); reg = gen_rtx_REG (DImode, regno); emit_move_insn (alt_reg, reg); do_spill (gen_movdi_x, alt_reg, cfa_off, reg); cfa_off -= 8; } /* Align the frame and spill all FR registers. */ for (regno = FR_REG (2); regno <= FR_REG (127); ++regno) if (TEST_HARD_REG_BIT (current_frame_info.mask, regno)) { if (cfa_off & 15) abort (); reg = gen_rtx_REG (TFmode, regno); do_spill (gen_fr_spill_x, reg, cfa_off, reg); cfa_off -= 16; } if (cfa_off != current_frame_info.spill_cfa_off) abort (); finish_spill_pointers (); } /* Called after register allocation to add any instructions needed for the epilogue. Using an epilogue insn is favored compared to putting all of the instructions in output_function_prologue(), since it allows the scheduler to intermix instructions with the saves of the caller saved registers. In some cases, it might be necessary to emit a barrier instruction as the last insn to prevent such scheduling. */ void ia64_expand_epilogue (sibcall_p) int sibcall_p; { rtx insn, reg, alt_reg, ar_unat_save_reg; int regno, alt_regno, cfa_off; ia64_compute_frame_size (get_frame_size ()); /* If there is a frame pointer, then we use it instead of the stack pointer, so that the stack pointer does not need to be valid when the epilogue starts. See EXIT_IGNORE_STACK. */ if (frame_pointer_needed) setup_spill_pointers (current_frame_info.n_spilled, hard_frame_pointer_rtx, 0); else setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx, current_frame_info.total_size); if (current_frame_info.total_size != 0) { /* ??? At this point we must generate a magic insn that appears to modify the spill iterators and the frame pointer. This would allow the most scheduling freedom. For now, just hard stop. */ emit_insn (gen_blockage ()); } /* Locate the bottom of the register save area. */ cfa_off = (current_frame_info.spill_cfa_off + current_frame_info.spill_size + current_frame_info.extra_spill_size); /* Restore the predicate registers. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0))) { if (current_frame_info.reg_save_pr != 0) alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr); else { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); do_restore (gen_movdi_x, alt_reg, cfa_off); cfa_off -= 8; } reg = gen_rtx_REG (DImode, PR_REG (0)); emit_move_insn (reg, alt_reg); } /* Restore the application registers. */ /* Load the saved unat from the stack, but do not restore it until after the GRs have been restored. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)) { if (current_frame_info.reg_save_ar_unat != 0) ar_unat_save_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat); else { alt_regno = next_scratch_gr_reg (); ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno); current_frame_info.gr_used_mask |= 1 << alt_regno; do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off); cfa_off -= 8; } } else ar_unat_save_reg = NULL_RTX; if (current_frame_info.reg_save_ar_pfs != 0) { alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs); reg = gen_rtx_REG (DImode, AR_PFS_REGNUM); emit_move_insn (reg, alt_reg); } else if (! current_function_is_leaf) { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); do_restore (gen_movdi_x, alt_reg, cfa_off); cfa_off -= 8; reg = gen_rtx_REG (DImode, AR_PFS_REGNUM); emit_move_insn (reg, alt_reg); } if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM)) { if (current_frame_info.reg_save_ar_lc != 0) alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc); else { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); do_restore (gen_movdi_x, alt_reg, cfa_off); cfa_off -= 8; } reg = gen_rtx_REG (DImode, AR_LC_REGNUM); emit_move_insn (reg, alt_reg); } /* We should now be at the base of the gr/br/fr spill area. */ if (cfa_off != (current_frame_info.spill_cfa_off + current_frame_info.spill_size)) abort (); /* The GP may be stored on the stack in the prologue, but it's never restored in the epilogue. Skip the stack slot. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1))) cfa_off -= 8; /* Restore all general registers. */ for (regno = GR_REG (2); regno <= GR_REG (31); ++regno) if (TEST_HARD_REG_BIT (current_frame_info.mask, regno)) { reg = gen_rtx_REG (DImode, regno); do_restore (gen_gr_restore, reg, cfa_off); cfa_off -= 8; } /* Restore the branch registers. Handle B0 specially, as it may have gotten stored in some GR register. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0))) { if (current_frame_info.reg_save_b0 != 0) alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0); else { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); do_restore (gen_movdi_x, alt_reg, cfa_off); cfa_off -= 8; } reg = gen_rtx_REG (DImode, BR_REG (0)); emit_move_insn (reg, alt_reg); } for (regno = BR_REG (1); regno <= BR_REG (7); ++regno) if (TEST_HARD_REG_BIT (current_frame_info.mask, regno)) { alt_regno = next_scratch_gr_reg (); alt_reg = gen_rtx_REG (DImode, alt_regno); do_restore (gen_movdi_x, alt_reg, cfa_off); cfa_off -= 8; reg = gen_rtx_REG (DImode, regno); emit_move_insn (reg, alt_reg); } /* Restore floating point registers. */ for (regno = FR_REG (2); regno <= FR_REG (127); ++regno) if (TEST_HARD_REG_BIT (current_frame_info.mask, regno)) { if (cfa_off & 15) abort (); reg = gen_rtx_REG (TFmode, regno); do_restore (gen_fr_restore_x, reg, cfa_off); cfa_off -= 16; } /* Restore ar.unat for real. */ if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)) { reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM); emit_move_insn (reg, ar_unat_save_reg); } if (cfa_off != current_frame_info.spill_cfa_off) abort (); finish_spill_pointers (); if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp) { /* ??? At this point we must generate a magic insn that appears to modify the spill iterators, the stack pointer, and the frame pointer. This would allow the most scheduling freedom. For now, just hard stop. */ emit_insn (gen_blockage ()); } if (cfun->machine->ia64_eh_epilogue_sp) emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp); else if (frame_pointer_needed) { insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; } else if (current_frame_info.total_size) { rtx offset, frame_size_rtx; frame_size_rtx = GEN_INT (current_frame_info.total_size); if (CONST_OK_FOR_I (current_frame_info.total_size)) offset = frame_size_rtx; else { regno = next_scratch_gr_reg (); offset = gen_rtx_REG (DImode, regno); emit_move_insn (offset, frame_size_rtx); } insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, offset)); RTX_FRAME_RELATED_P (insn) = 1; if (GET_CODE (offset) != CONST_INT) { REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, gen_rtx_SET (VOIDmode, stack_pointer_rtx, gen_rtx_PLUS (DImode, stack_pointer_rtx, frame_size_rtx)), REG_NOTES (insn)); } } if (cfun->machine->ia64_eh_epilogue_bsp) emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp)); if (! sibcall_p) emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0)))); else { int fp = GR_REG (2); /* We need a throw away register here, r0 and r1 are reserved, so r2 is the first available call clobbered register. If there was a frame_pointer register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make sure we're using the string "r2" when emitting the register name for the assmbler. */ if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2)) fp = HARD_FRAME_POINTER_REGNUM; /* We must emit an alloc to force the input registers to become output registers. Otherwise, if the callee tries to pass its parameters through to another call without an intervening alloc, then these values get lost. */ /* ??? We don't need to preserve all input registers. We only need to preserve those input registers used as arguments to the sibling call. It is unclear how to compute that number here. */ if (current_frame_info.n_input_regs != 0) emit_insn (gen_alloc (gen_rtx_REG (DImode, fp), GEN_INT (0), GEN_INT (0), GEN_INT (current_frame_info.n_input_regs), GEN_INT (0))); } } /* Return 1 if br.ret can do all the work required to return from a function. */ int ia64_direct_return () { if (reload_completed && ! frame_pointer_needed) { ia64_compute_frame_size (get_frame_size ()); return (current_frame_info.total_size == 0 && current_frame_info.n_spilled == 0 && current_frame_info.reg_save_b0 == 0 && current_frame_info.reg_save_pr == 0 && current_frame_info.reg_save_ar_pfs == 0 && current_frame_info.reg_save_ar_unat == 0 && current_frame_info.reg_save_ar_lc == 0); } return 0; } int ia64_hard_regno_rename_ok (from, to) int from; int to; { /* Don't clobber any of the registers we reserved for the prologue. */ if (to == current_frame_info.reg_fp || to == current_frame_info.reg_save_b0 || to == current_frame_info.reg_save_pr || to == current_frame_info.reg_save_ar_pfs || to == current_frame_info.reg_save_ar_unat || to == current_frame_info.reg_save_ar_lc) return 0; if (from == current_frame_info.reg_fp || from == current_frame_info.reg_save_b0 || from == current_frame_info.reg_save_pr || from == current_frame_info.reg_save_ar_pfs || from == current_frame_info.reg_save_ar_unat || from == current_frame_info.reg_save_ar_lc) return 0; /* Don't use output registers outside the register frame. */ if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs)) return 0; /* Retain even/oddness on predicate register pairs. */ if (PR_REGNO_P (from) && PR_REGNO_P (to)) return (from & 1) == (to & 1); return 1; } /* Target hook for assembling integer objects. Handle word-sized aligned objects and detect the cases when @fptr is needed. */ static bool ia64_assemble_integer (x, size, aligned_p) rtx x; unsigned int size; int aligned_p; { if (size == (TARGET_ILP32 ? 4 : 8) && aligned_p && !(TARGET_NO_PIC || TARGET_AUTO_PIC) && GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_FLAG (x)) { if (TARGET_ILP32) fputs ("\tdata4\t@fptr(", asm_out_file); else fputs ("\tdata8\t@fptr(", asm_out_file); output_addr_const (asm_out_file, x); fputs (")\n", asm_out_file); return true; } return default_assemble_integer (x, size, aligned_p); } /* Emit the function prologue. */ static void ia64_output_function_prologue (file, size) FILE *file; HOST_WIDE_INT size ATTRIBUTE_UNUSED; { int mask, grsave, grsave_prev; if (current_frame_info.need_regstk) fprintf (file, "\t.regstk %d, %d, %d, %d\n", current_frame_info.n_input_regs, current_frame_info.n_local_regs, current_frame_info.n_output_regs, current_frame_info.n_rotate_regs); if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS)) return; /* Emit the .prologue directive. */ mask = 0; grsave = grsave_prev = 0; if (current_frame_info.reg_save_b0 != 0) { mask |= 8; grsave = grsave_prev = current_frame_info.reg_save_b0; } if (current_frame_info.reg_save_ar_pfs != 0 && (grsave_prev == 0 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1)) { mask |= 4; if (grsave_prev == 0) grsave = current_frame_info.reg_save_ar_pfs; grsave_prev = current_frame_info.reg_save_ar_pfs; } if (current_frame_info.reg_fp != 0 && (grsave_prev == 0 || current_frame_info.reg_fp == grsave_prev + 1)) { mask |= 2; if (grsave_prev == 0) grsave = HARD_FRAME_POINTER_REGNUM; grsave_prev = current_frame_info.reg_fp; } if (current_frame_info.reg_save_pr != 0 && (grsave_prev == 0 || current_frame_info.reg_save_pr == grsave_prev + 1)) { mask |= 1; if (grsave_prev == 0) grsave = current_frame_info.reg_save_pr; } if (mask) fprintf (file, "\t.prologue %d, %d\n", mask, ia64_dbx_register_number (grsave)); else fputs ("\t.prologue\n", file); /* Emit a .spill directive, if necessary, to relocate the base of the register spill area. */ if (current_frame_info.spill_cfa_off != -16) fprintf (file, "\t.spill %ld\n", (long) (current_frame_info.spill_cfa_off + current_frame_info.spill_size)); } /* Emit the .body directive at the scheduled end of the prologue. */ static void ia64_output_function_end_prologue (file) FILE *file; { if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS)) return; fputs ("\t.body\n", file); } /* Emit the function epilogue. */ static void ia64_output_function_epilogue (file, size) FILE *file ATTRIBUTE_UNUSED; HOST_WIDE_INT size ATTRIBUTE_UNUSED; { int i; /* Reset from the function's potential modifications. */ XINT (return_address_pointer_rtx, 0) = RETURN_ADDRESS_POINTER_REGNUM; if (current_frame_info.reg_fp) { const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM]; reg_names[HARD_FRAME_POINTER_REGNUM] = reg_names[current_frame_info.reg_fp]; reg_names[current_frame_info.reg_fp] = tmp; } if (! TARGET_REG_NAMES) { for (i = 0; i < current_frame_info.n_input_regs; i++) reg_names[IN_REG (i)] = ia64_input_reg_names[i]; for (i = 0; i < current_frame_info.n_local_regs; i++) reg_names[LOC_REG (i)] = ia64_local_reg_names[i]; for (i = 0; i < current_frame_info.n_output_regs; i++) reg_names[OUT_REG (i)] = ia64_output_reg_names[i]; } current_frame_info.initialized = 0; } int ia64_dbx_register_number (regno) int regno; { /* In ia64_expand_prologue we quite literally renamed the frame pointer from its home at loc79 to something inside the register frame. We must perform the same renumbering here for the debug info. */ if (current_frame_info.reg_fp) { if (regno == HARD_FRAME_POINTER_REGNUM) regno = current_frame_info.reg_fp; else if (regno == current_frame_info.reg_fp) regno = HARD_FRAME_POINTER_REGNUM; } if (IN_REGNO_P (regno)) return 32 + regno - IN_REG (0); else if (LOC_REGNO_P (regno)) return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0); else if (OUT_REGNO_P (regno)) return (32 + current_frame_info.n_input_regs + current_frame_info.n_local_regs + regno - OUT_REG (0)); else return regno; } void ia64_initialize_trampoline (addr, fnaddr, static_chain) rtx addr, fnaddr, static_chain; { rtx addr_reg, eight = GEN_INT (8); /* Load up our iterator. */ addr_reg = gen_reg_rtx (Pmode); emit_move_insn (addr_reg, addr); /* The first two words are the fake descriptor: __ia64_trampoline, ADDR+16. */ emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline")); emit_insn (gen_adddi3 (addr_reg, addr_reg, eight)); emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), copy_to_reg (plus_constant (addr, 16))); emit_insn (gen_adddi3 (addr_reg, addr_reg, eight)); /* The third word is the target descriptor. */ emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr); emit_insn (gen_adddi3 (addr_reg, addr_reg, eight)); /* The fourth word is the static chain. */ emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain); } /* Do any needed setup for a variadic function. CUM has not been updated for the last named argument which has type TYPE and mode MODE. We generate the actual spill instructions during prologue generation. */ void ia64_setup_incoming_varargs (cum, int_mode, type, pretend_size, second_time) CUMULATIVE_ARGS cum; int int_mode; tree type; int * pretend_size; int second_time ATTRIBUTE_UNUSED; { /* Skip the current argument. */ ia64_function_arg_advance (&cum, int_mode, type, 1); if (cum.words < MAX_ARGUMENT_SLOTS) { int n = MAX_ARGUMENT_SLOTS - cum.words; *pretend_size = n * UNITS_PER_WORD; cfun->machine->n_varargs = n; } } /* Check whether TYPE is a homogeneous floating point aggregate. If it is, return the mode of the floating point type that appears in all leafs. If it is not, return VOIDmode. An aggregate is a homogeneous floating point aggregate is if all fields/elements in it have the same floating point type (e.g, SFmode). 128-bit quad-precision floats are excluded. */ static enum machine_mode hfa_element_mode (type, nested) tree type; int nested; { enum machine_mode element_mode = VOIDmode; enum machine_mode mode; enum tree_code code = TREE_CODE (type); int know_element_mode = 0; tree t; switch (code) { case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE: case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE: case FILE_TYPE: case SET_TYPE: case LANG_TYPE: case FUNCTION_TYPE: return VOIDmode; /* Fortran complex types are supposed to be HFAs, so we need to handle gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex types though. */ case COMPLEX_TYPE: if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT && (TYPE_MODE (type) != TCmode || INTEL_EXTENDED_IEEE_FORMAT)) return mode_for_size (GET_MODE_UNIT_SIZE (TYPE_MODE (type)) * BITS_PER_UNIT, MODE_FLOAT, 0); else return VOIDmode; case REAL_TYPE: /* We want to return VOIDmode for raw REAL_TYPEs, but the actual mode if this is contained within an aggregate. */ if (nested && (TYPE_MODE (type) != TFmode || INTEL_EXTENDED_IEEE_FORMAT)) return TYPE_MODE (type); else return VOIDmode; case ARRAY_TYPE: return hfa_element_mode (TREE_TYPE (type), 1); case RECORD_TYPE: case UNION_TYPE: case QUAL_UNION_TYPE: for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t)) { if (TREE_CODE (t) != FIELD_DECL) continue; mode = hfa_element_mode (TREE_TYPE (t), 1); if (know_element_mode) { if (mode != element_mode) return VOIDmode; } else if (GET_MODE_CLASS (mode) != MODE_FLOAT) return VOIDmode; else { know_element_mode = 1; element_mode = mode; } } return element_mode; default: /* If we reach here, we probably have some front-end specific type that the backend doesn't know about. This can happen via the aggregate_value_p call in init_function_start. All we can do is ignore unknown tree types. */ return VOIDmode; } return VOIDmode; } /* Return rtx for register where argument is passed, or zero if it is passed on the stack. */ /* ??? 128-bit quad-precision floats are always passed in general registers. */ rtx ia64_function_arg (cum, mode, type, named, incoming) CUMULATIVE_ARGS *cum; enum machine_mode mode; tree type; int named; int incoming; { int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST); int words = (((mode == BLKmode ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD); int offset = 0; enum machine_mode hfa_mode = VOIDmode; /* Integer and float arguments larger than 8 bytes start at the next even boundary. Aggregates larger than 8 bytes start at the next even boundary if the aggregate has 16 byte alignment. Net effect is that types with alignment greater than 8 start at the next even boundary. */ /* ??? The ABI does not specify how to handle aggregates with alignment from 9 to 15 bytes, or greater than 16. We handle them all as if they had 16 byte alignment. Such aggregates can occur only if gcc extensions are used. */ if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT) : (words > 1)) && (cum->words & 1)) offset = 1; /* If all argument slots are used, then it must go on the stack. */ if (cum->words + offset >= MAX_ARGUMENT_SLOTS) return 0; /* Check for and handle homogeneous FP aggregates. */ if (type) hfa_mode = hfa_element_mode (type, 0); /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas and unprototyped hfas are passed specially. */ if (hfa_mode != VOIDmode && (! cum->prototype || named)) { rtx loc[16]; int i = 0; int fp_regs = cum->fp_regs; int int_regs = cum->words + offset; int hfa_size = GET_MODE_SIZE (hfa_mode); int byte_size; int args_byte_size; /* If prototyped, pass it in FR regs then GR regs. If not prototyped, pass it in both FR and GR regs. If this is an SFmode aggregate, then it is possible to run out of FR regs while GR regs are still left. In that case, we pass the remaining part in the GR regs. */ /* Fill the FP regs. We do this always. We stop if we reach the end of the argument, the last FP register, or the last argument slot. */ byte_size = ((mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)); args_byte_size = int_regs * UNITS_PER_WORD; offset = 0; for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++) { loc[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (hfa_mode, (FR_ARG_FIRST + fp_regs)), GEN_INT (offset)); offset += hfa_size; args_byte_size += hfa_size; fp_regs++; } /* If no prototype, then the whole thing must go in GR regs. */ if (! cum->prototype) offset = 0; /* If this is an SFmode aggregate, then we might have some left over that needs to go in GR regs. */ else if (byte_size != offset) int_regs += offset / UNITS_PER_WORD; /* Fill in the GR regs. We must use DImode here, not the hfa mode. */ for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++) { enum machine_mode gr_mode = DImode; /* If we have an odd 4 byte hunk because we ran out of FR regs, then this goes in a GR reg left adjusted/little endian, right adjusted/big endian. */ /* ??? Currently this is handled wrong, because 4-byte hunks are always right adjusted/little endian. */ if (offset & 0x4) gr_mode = SImode; /* If we have an even 4 byte hunk because the aggregate is a multiple of 4 bytes in size, then this goes in a GR reg right adjusted/little endian. */ else if (byte_size - offset == 4) gr_mode = SImode; /* Complex floats need to have float mode. */ if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT) gr_mode = hfa_mode; loc[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (gr_mode, (basereg + int_regs)), GEN_INT (offset)); offset += GET_MODE_SIZE (gr_mode); int_regs += GET_MODE_SIZE (gr_mode) <= UNITS_PER_WORD ? 1 : GET_MODE_SIZE (gr_mode) / UNITS_PER_WORD; } /* If we ended up using just one location, just return that one loc. */ if (i == 1) return XEXP (loc[0], 0); else return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc)); } /* Integral and aggregates go in general registers. If we have run out of FR registers, then FP values must also go in general registers. This can happen when we have a SFmode HFA. */ else if (((mode == TFmode) && ! INTEL_EXTENDED_IEEE_FORMAT) || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)) { int byte_size = ((mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)); if (BYTES_BIG_ENDIAN && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type))) && byte_size < UNITS_PER_WORD && byte_size > 0) { rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (DImode, (basereg + cum->words + offset)), const0_rtx); return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg)); } else return gen_rtx_REG (mode, basereg + cum->words + offset); } /* If there is a prototype, then FP values go in a FR register when named, and in a GR registeer when unnamed. */ else if (cum->prototype) { if (! named) return gen_rtx_REG (mode, basereg + cum->words + offset); else return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs); } /* If there is no prototype, then FP values go in both FR and GR registers. */ else { rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, (FR_ARG_FIRST + cum->fp_regs)), const0_rtx); rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (mode, (basereg + cum->words + offset)), const0_rtx); return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg)); } } /* Return number of words, at the beginning of the argument, that must be put in registers. 0 is the argument is entirely in registers or entirely in memory. */ int ia64_function_arg_partial_nregs (cum, mode, type, named) CUMULATIVE_ARGS *cum; enum machine_mode mode; tree type; int named ATTRIBUTE_UNUSED; { int words = (((mode == BLKmode ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD); int offset = 0; /* Arguments with alignment larger than 8 bytes start at the next even boundary. */ if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT) : (words > 1)) && (cum->words & 1)) offset = 1; /* If all argument slots are used, then it must go on the stack. */ if (cum->words + offset >= MAX_ARGUMENT_SLOTS) return 0; /* It doesn't matter whether the argument goes in FR or GR regs. If it fits within the 8 argument slots, then it goes entirely in registers. If it extends past the last argument slot, then the rest goes on the stack. */ if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS) return 0; return MAX_ARGUMENT_SLOTS - cum->words - offset; } /* Update CUM to point after this argument. This is patterned after ia64_function_arg. */ void ia64_function_arg_advance (cum, mode, type, named) CUMULATIVE_ARGS *cum; enum machine_mode mode; tree type; int named; { int words = (((mode == BLKmode ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD); int offset = 0; enum machine_mode hfa_mode = VOIDmode; /* If all arg slots are already full, then there is nothing to do. */ if (cum->words >= MAX_ARGUMENT_SLOTS) return; /* Arguments with alignment larger than 8 bytes start at the next even boundary. */ if ((type ? (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT) : (words > 1)) && (cum->words & 1)) offset = 1; cum->words += words + offset; /* Check for and handle homogeneous FP aggregates. */ if (type) hfa_mode = hfa_element_mode (type, 0); /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas and unprototyped hfas are passed specially. */ if (hfa_mode != VOIDmode && (! cum->prototype || named)) { int fp_regs = cum->fp_regs; /* This is the original value of cum->words + offset. */ int int_regs = cum->words - words; int hfa_size = GET_MODE_SIZE (hfa_mode); int byte_size; int args_byte_size; /* If prototyped, pass it in FR regs then GR regs. If not prototyped, pass it in both FR and GR regs. If this is an SFmode aggregate, then it is possible to run out of FR regs while GR regs are still left. In that case, we pass the remaining part in the GR regs. */ /* Fill the FP regs. We do this always. We stop if we reach the end of the argument, the last FP register, or the last argument slot. */ byte_size = ((mode == BLKmode) ? int_size_in_bytes (type) : GET_MODE_SIZE (mode)); args_byte_size = int_regs * UNITS_PER_WORD; offset = 0; for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));) { offset += hfa_size; args_byte_size += hfa_size; fp_regs++; } cum->fp_regs = fp_regs; } /* Integral and aggregates go in general registers. If we have run out of FR registers, then FP values must also go in general registers. This can happen when we have a SFmode HFA. */ else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS) cum->int_regs = cum->words; /* If there is a prototype, then FP values go in a FR register when named, and in a GR registeer when unnamed. */ else if (cum->prototype) { if (! named) cum->int_regs = cum->words; else /* ??? Complex types should not reach here. */ cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1); } /* If there is no prototype, then FP values go in both FR and GR registers. */ else { /* ??? Complex types should not reach here. */ cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1); cum->int_regs = cum->words; } } /* Variable sized types are passed by reference. */ /* ??? At present this is a GCC extension to the IA-64 ABI. */ int ia64_function_arg_pass_by_reference (cum, mode, type, named) CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED; enum machine_mode mode ATTRIBUTE_UNUSED; tree type; int named ATTRIBUTE_UNUSED; { return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST; } /* Implement va_arg. */ rtx ia64_va_arg (valist, type) tree valist, type; { tree t; /* Variable sized types are passed by reference. */ if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST) { rtx addr = std_expand_builtin_va_arg (valist, build_pointer_type (type)); return gen_rtx_MEM (ptr_mode, force_reg (Pmode, addr)); } /* Arguments with alignment larger than 8 bytes start at the next even boundary. */ if (TYPE_ALIGN (type) > 8 * BITS_PER_UNIT) { t = build (PLUS_EXPR, TREE_TYPE (valist), valist, build_int_2 (2 * UNITS_PER_WORD - 1, 0)); t = build (BIT_AND_EXPR, TREE_TYPE (t), t, build_int_2 (-2 * UNITS_PER_WORD, -1)); t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t); TREE_SIDE_EFFECTS (t) = 1; expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); } return std_expand_builtin_va_arg (valist, type); } /* Return 1 if function return value returned in memory. Return 0 if it is in a register. */ int ia64_return_in_memory (valtype) tree valtype; { enum machine_mode mode; enum machine_mode hfa_mode; HOST_WIDE_INT byte_size; mode = TYPE_MODE (valtype); byte_size = GET_MODE_SIZE (mode); if (mode == BLKmode) { byte_size = int_size_in_bytes (valtype); if (byte_size < 0) return 1; } /* Hfa's with up to 8 elements are returned in the FP argument registers. */ hfa_mode = hfa_element_mode (valtype, 0); if (hfa_mode != VOIDmode) { int hfa_size = GET_MODE_SIZE (hfa_mode); if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS) return 1; else return 0; } else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS) return 1; else return 0; } /* Return rtx for register that holds the function return value. */ rtx ia64_function_value (valtype, func) tree valtype; tree func ATTRIBUTE_UNUSED; { enum machine_mode mode; enum machine_mode hfa_mode; mode = TYPE_MODE (valtype); hfa_mode = hfa_element_mode (valtype, 0); if (hfa_mode != VOIDmode) { rtx loc[8]; int i; int hfa_size; int byte_size; int offset; hfa_size = GET_MODE_SIZE (hfa_mode); byte_size = ((mode == BLKmode) ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode)); offset = 0; for (i = 0; offset < byte_size; i++) { loc[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i), GEN_INT (offset)); offset += hfa_size; } if (i == 1) return XEXP (loc[0], 0); else return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc)); } else if (FLOAT_TYPE_P (valtype) && ((mode != TFmode) || INTEL_EXTENDED_IEEE_FORMAT)) return gen_rtx_REG (mode, FR_ARG_FIRST); else { if (BYTES_BIG_ENDIAN && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype)))) { rtx loc[8]; int offset; int bytesize; int i; offset = 0; bytesize = int_size_in_bytes (valtype); for (i = 0; offset < bytesize; i++) { loc[i] = gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (DImode, GR_RET_FIRST + i), GEN_INT (offset)); offset += UNITS_PER_WORD; } return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc)); } else return gen_rtx_REG (mode, GR_RET_FIRST); } } /* Print a memory address as an operand to reference that memory location. */ /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps also call this from ia64_print_operand for memory addresses. */ void ia64_print_operand_address (stream, address) FILE * stream ATTRIBUTE_UNUSED; rtx address ATTRIBUTE_UNUSED; { } /* Print an operand to an assembler instruction. C Swap and print a comparison operator. D Print an FP comparison operator. E Print 32 - constant, for SImode shifts as extract. e Print 64 - constant, for DImode rotates. F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or a floating point register emitted normally. I Invert a predicate register by adding 1. J Select the proper predicate register for a condition. j Select the inverse predicate register for a condition. O Append .acq for volatile load. P Postincrement of a MEM. Q Append .rel for volatile store. S Shift amount for shladd instruction. T Print an 8-bit sign extended number (K) as a 32-bit unsigned number for Intel assembler. U Print an 8-bit sign extended number (K) as a 64-bit unsigned number for Intel assembler. r Print register name, or constant 0 as r0. HP compatibility for Linux kernel. */ void ia64_print_operand (file, x, code) FILE * file; rtx x; int code; { const char *str; switch (code) { case 0: /* Handled below. */ break; case 'C': { enum rtx_code c = swap_condition (GET_CODE (x)); fputs (GET_RTX_NAME (c), file); return; } case 'D': switch (GET_CODE (x)) { case NE: str = "neq"; break; case UNORDERED: str = "unord"; break; case ORDERED: str = "ord"; break; default: str = GET_RTX_NAME (GET_CODE (x)); break; } fputs (str, file); return; case 'E': fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x)); return; case 'e': fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x)); return; case 'F': if (x == CONST0_RTX (GET_MODE (x))) str = reg_names [FR_REG (0)]; else if (x == CONST1_RTX (GET_MODE (x))) str = reg_names [FR_REG (1)]; else if (GET_CODE (x) == REG) str = reg_names [REGNO (x)]; else abort (); fputs (str, file); return; case 'I': fputs (reg_names [REGNO (x) + 1], file); return; case 'J': case 'j': { unsigned int regno = REGNO (XEXP (x, 0)); if (GET_CODE (x) == EQ) regno += 1; if (code == 'j') regno ^= 1; fputs (reg_names [regno], file); } return; case 'O': if (MEM_VOLATILE_P (x)) fputs(".acq", file); return; case 'P': { HOST_WIDE_INT value; switch (GET_CODE (XEXP (x, 0))) { default: return; case POST_MODIFY: x = XEXP (XEXP (XEXP (x, 0), 1), 1); if (GET_CODE (x) == CONST_INT) value = INTVAL (x); else if (GET_CODE (x) == REG) { fprintf (file, ", %s", reg_names[REGNO (x)]); return; } else abort (); break; case POST_INC: value = GET_MODE_SIZE (GET_MODE (x)); break; case POST_DEC: value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x)); break; } putc (',', file); putc (' ', file); fprintf (file, HOST_WIDE_INT_PRINT_DEC, value); return; } case 'Q': if (MEM_VOLATILE_P (x)) fputs(".rel", file); return; case 'S': fprintf (file, "%d", exact_log2 (INTVAL (x))); return; case 'T': if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT) { fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff); return; } break; case 'U': if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT) { const char *prefix = "0x"; if (INTVAL (x) & 0x80000000) { fprintf (file, "0xffffffff"); prefix = ""; } fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff); return; } break; case 'r': /* If this operand is the constant zero, write it as register zero. Any register, zero, or CONST_INT value is OK here. */ if (GET_CODE (x) == REG) fputs (reg_names[REGNO (x)], file); else if (x == CONST0_RTX (GET_MODE (x))) fputs ("r0", file); else if (GET_CODE (x) == CONST_INT) output_addr_const (file, x); else output_operand_lossage ("invalid %%r value"); return; case '+': { const char *which; /* For conditional branches, returns or calls, substitute sptk, dptk, dpnt, or spnt for %s. */ x = find_reg_note (current_output_insn, REG_BR_PROB, 0); if (x) { int pred_val = INTVAL (XEXP (x, 0)); /* Guess top and bottom 10% statically predicted. */ if (pred_val < REG_BR_PROB_BASE / 50) which = ".spnt"; else if (pred_val < REG_BR_PROB_BASE / 2) which = ".dpnt"; else if (pred_val < REG_BR_PROB_BASE / 100 * 98) which = ".dptk"; else which = ".sptk"; } else if (GET_CODE (current_output_insn) == CALL_INSN) which = ".sptk"; else which = ".dptk"; fputs (which, file); return; } case ',': x = current_insn_predicate; if (x) { unsigned int regno = REGNO (XEXP (x, 0)); if (GET_CODE (x) == EQ) regno += 1; fprintf (file, "(%s) ", reg_names [regno]); } return; default: output_operand_lossage ("ia64_print_operand: unknown code"); return; } switch (GET_CODE (x)) { /* This happens for the spill/restore instructions. */ case POST_INC: case POST_DEC: case POST_MODIFY: x = XEXP (x, 0); /* ... fall through ... */ case REG: fputs (reg_names [REGNO (x)], file); break; case MEM: { rtx addr = XEXP (x, 0); if (GET_RTX_CLASS (GET_CODE (addr)) == 'a') addr = XEXP (addr, 0); fprintf (file, "[%s]", reg_names [REGNO (addr)]); break; } default: output_addr_const (file, x); break; } return; } /* Calulate the cost of moving data from a register in class FROM to one in class TO, using MODE. */ int ia64_register_move_cost (mode, from, to) enum machine_mode mode; enum reg_class from, to; { /* ADDL_REGS is the same as GR_REGS for movement purposes. */ if (to == ADDL_REGS) to = GR_REGS; if (from == ADDL_REGS) from = GR_REGS; /* All costs are symmetric, so reduce cases by putting the lower number class as the destination. */ if (from < to) { enum reg_class tmp = to; to = from, from = tmp; } /* Moving from FR<->GR in TFmode must be more expensive than 2, so that we get secondary memory reloads. Between FR_REGS, we have to make this at least as expensive as MEMORY_MOVE_COST to avoid spectacularly poor register class preferencing. */ if (mode == TFmode) { if (to != GR_REGS || from != GR_REGS) return MEMORY_MOVE_COST (mode, to, 0); else return 3; } switch (to) { case PR_REGS: /* Moving between PR registers takes two insns. */ if (from == PR_REGS) return 3; /* Moving between PR and anything but GR is impossible. */ if (from != GR_REGS) return MEMORY_MOVE_COST (mode, to, 0); break; case BR_REGS: /* Moving between BR and anything but GR is impossible. */ if (from != GR_REGS && from != GR_AND_BR_REGS) return MEMORY_MOVE_COST (mode, to, 0); break; case AR_I_REGS: case AR_M_REGS: /* Moving between AR and anything but GR is impossible. */ if (from != GR_REGS) return MEMORY_MOVE_COST (mode, to, 0); break; case GR_REGS: case FR_REGS: case GR_AND_FR_REGS: case GR_AND_BR_REGS: case ALL_REGS: break; default: abort (); } return 2; } /* This function returns the register class required for a secondary register when copying between one of the registers in CLASS, and X, using MODE. A return value of NO_REGS means that no secondary register is required. */ enum reg_class ia64_secondary_reload_class (class, mode, x) enum reg_class class; enum machine_mode mode ATTRIBUTE_UNUSED; rtx x; { int regno = -1; if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG) regno = true_regnum (x); switch (class) { case BR_REGS: case AR_M_REGS: case AR_I_REGS: /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global interaction. We end up with two pseudos with overlapping lifetimes both of which are equiv to the same constant, and both which need to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end changes depending on the path length, which means the qty_first_reg check in make_regs_eqv can give different answers at different times. At some point I'll probably need a reload_indi pattern to handle this. We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we wound up with a FP register from GR_AND_FR_REGS. Extend that to all non-general registers for good measure. */ if (regno >= 0 && ! GENERAL_REGNO_P (regno)) return GR_REGS; /* This is needed if a pseudo used as a call_operand gets spilled to a stack slot. */ if (GET_CODE (x) == MEM) return GR_REGS; break; case FR_REGS: /* Need to go through general regsters to get to other class regs. */ if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno))) return GR_REGS; /* This can happen when a paradoxical subreg is an operand to the muldi3 pattern. */ /* ??? This shouldn't be necessary after instruction scheduling is enabled, because paradoxical subregs are not accepted by register_operand when INSN_SCHEDULING is defined. Or alternatively, stop the paradoxical subreg stupidity in the *_operand functions in recog.c. */ if (GET_CODE (x) == MEM && (GET_MODE (x) == SImode || GET_MODE (x) == HImode || GET_MODE (x) == QImode)) return GR_REGS; /* This can happen because of the ior/and/etc patterns that accept FP registers as operands. If the third operand is a constant, then it needs to be reloaded into a FP register. */ if (GET_CODE (x) == CONST_INT) return GR_REGS; /* This can happen because of register elimination in a muldi3 insn. E.g. `26107 * (unsigned long)&u'. */ if (GET_CODE (x) == PLUS) return GR_REGS; break; case PR_REGS: /* ??? This happens if we cse/gcse a BImode value across a call, and the function has a nonlocal goto. This is because global does not allocate call crossing pseudos to hard registers when current_function_has_nonlocal_goto is true. This is relatively common for C++ programs that use exceptions. To reproduce, return NO_REGS and compile libstdc++. */ if (GET_CODE (x) == MEM) return GR_REGS; /* This can happen when we take a BImode subreg of a DImode value, and that DImode value winds up in some non-GR register. */ if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno)) return GR_REGS; break; case GR_REGS: /* Since we have no offsettable memory addresses, we need a temporary to hold the address of the second word. */ if (mode == TImode) return GR_REGS; break; default: break; } return NO_REGS; } /* Emit text to declare externally defined variables and functions, because the Intel assembler does not support undefined externals. */ void ia64_asm_output_external (file, decl, name) FILE *file; tree decl; const char *name; { int save_referenced; /* GNU as does not need anything here, but the HP linker does need something for external functions. */ if (TARGET_GNU_AS && (!TARGET_HPUX_LD || TREE_CODE (decl) != FUNCTION_DECL || strstr(name, "__builtin_") == name)) return; /* ??? The Intel assembler creates a reference that needs to be satisfied by the linker when we do this, so we need to be careful not to do this for builtin functions which have no library equivalent. Unfortunately, we can't tell here whether or not a function will actually be called by expand_expr, so we pull in library functions even if we may not need them later. */ if (! strcmp (name, "__builtin_next_arg") || ! strcmp (name, "alloca") || ! strcmp (name, "__builtin_constant_p") || ! strcmp (name, "__builtin_args_info")) return; if (TARGET_HPUX_LD) ia64_hpux_add_extern_decl (name); else { /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and restore it. */ save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)); if (TREE_CODE (decl) == FUNCTION_DECL) ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function"); (*targetm.asm_out.globalize_label) (file, name); TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced; } } /* Parse the -mfixed-range= option string. */ static void fix_range (const_str) const char *const_str; { int i, first, last; char *str, *dash, *comma; /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and REG2 are either register names or register numbers. The effect of this option is to mark the registers in the range from REG1 to REG2 as ``fixed'' so they won't be used by the compiler. This is used, e.g., to ensure that kernel mode code doesn't use f32-f127. */ i = strlen (const_str); str = (char *) alloca (i + 1); memcpy (str, const_str, i + 1); while (1) { dash = strchr (str, '-'); if (!dash) { warning ("value of -mfixed-range must have form REG1-REG2"); return; } *dash = '\0'; comma = strchr (dash + 1, ','); if (comma) *comma = '\0'; first = decode_reg_name (str); if (first < 0) { warning ("unknown register name: %s", str); return; } last = decode_reg_name (dash + 1); if (last < 0) { warning ("unknown register name: %s", dash + 1); return; } *dash = '-'; if (first > last) { warning ("%s-%s is an empty range", str, dash + 1); return; } for (i = first; i <= last; ++i) fixed_regs[i] = call_used_regs[i] = 1; if (!comma) break; *comma = ','; str = comma + 1; } } static struct machine_function * ia64_init_machine_status () { return ggc_alloc_cleared (sizeof (struct machine_function)); } /* Handle TARGET_OPTIONS switches. */ void ia64_override_options () { if (TARGET_AUTO_PIC) target_flags |= MASK_CONST_GP; if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR) { warning ("cannot optimize floating point division for both latency and throughput"); target_flags &= ~MASK_INLINE_FLOAT_DIV_THR; } if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR) { warning ("cannot optimize integer division for both latency and throughput"); target_flags &= ~MASK_INLINE_INT_DIV_THR; } if (ia64_fixed_range_string) fix_range (ia64_fixed_range_string); if (ia64_tls_size_string) { char *end; unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10); if (*end || (tmp != 14 && tmp != 22 && tmp != 64)) error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string); else ia64_tls_size = tmp; } ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload; flag_schedule_insns_after_reload = 0; ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE; init_machine_status = ia64_init_machine_status; /* Tell the compiler which flavor of TFmode we're using. */ if (INTEL_EXTENDED_IEEE_FORMAT) real_format_for_mode[TFmode - QFmode] = &ieee_extended_intel_128_format; } static enum attr_itanium_requires_unit0 ia64_safe_itanium_requires_unit0 PARAMS((rtx)); static enum attr_itanium_class ia64_safe_itanium_class PARAMS((rtx)); static enum attr_type ia64_safe_type PARAMS((rtx)); static enum attr_itanium_requires_unit0 ia64_safe_itanium_requires_unit0 (insn) rtx insn; { if (recog_memoized (insn) >= 0) return get_attr_itanium_requires_unit0 (insn); else return ITANIUM_REQUIRES_UNIT0_NO; } static enum attr_itanium_class ia64_safe_itanium_class (insn) rtx insn; { if (recog_memoized (insn) >= 0) return get_attr_itanium_class (insn); else return ITANIUM_CLASS_UNKNOWN; } static enum attr_type ia64_safe_type (insn) rtx insn; { if (recog_memoized (insn) >= 0) return get_attr_type (insn); else return TYPE_UNKNOWN; } /* The following collection of routines emit instruction group stop bits as necessary to avoid dependencies. */ /* Need to track some additional registers as far as serialization is concerned so we can properly handle br.call and br.ret. We could make these registers visible to gcc, but since these registers are never explicitly used in gcc generated code, it seems wasteful to do so (plus it would make the call and return patterns needlessly complex). */ #define REG_GP (GR_REG (1)) #define REG_RP (BR_REG (0)) #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1) /* This is used for volatile asms which may require a stop bit immediately before and after them. */ #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2) #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3) #define NUM_REGS (AR_UNAT_BIT_0 + 64) /* For each register, we keep track of how it has been written in the current instruction group. If a register is written unconditionally (no qualifying predicate), WRITE_COUNT is set to 2 and FIRST_PRED is ignored. If a register is written if its qualifying predicate P is true, we set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register may be written again by the complement of P (P^1) and when this happens, WRITE_COUNT gets set to 2. The result of this is that whenever an insn attempts to write a register whose WRITE_COUNT is two, we need to issue an insn group barrier first. If a predicate register is written by a floating-point insn, we set WRITTEN_BY_FP to true. If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */ struct reg_write_state { unsigned int write_count : 2; unsigned int first_pred : 16; unsigned int written_by_fp : 1; unsigned int written_by_and : 1; unsigned int written_by_or : 1; }; /* Cumulative info for the current instruction group. */ struct reg_write_state rws_sum[NUM_REGS]; /* Info for the current instruction. This gets copied to rws_sum after a stop bit is emitted. */ struct reg_write_state rws_insn[NUM_REGS]; /* Indicates whether this is the first instruction after a stop bit, in which case we don't need another stop bit. Without this, we hit the abort in ia64_variable_issue when scheduling an alloc. */ static int first_instruction; /* Misc flags needed to compute RAW/WAW dependencies while we are traversing RTL for one instruction. */ struct reg_flags { unsigned int is_write : 1; /* Is register being written? */ unsigned int is_fp : 1; /* Is register used as part of an fp op? */ unsigned int is_branch : 1; /* Is register used as part of a branch? */ unsigned int is_and : 1; /* Is register used as part of and.orcm? */ unsigned int is_or : 1; /* Is register used as part of or.andcm? */ unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */ }; static void rws_update PARAMS ((struct reg_write_state *, int, struct reg_flags, int)); static int rws_access_regno PARAMS ((int, struct reg_flags, int)); static int rws_access_reg PARAMS ((rtx, struct reg_flags, int)); static void update_set_flags PARAMS ((rtx, struct reg_flags *, int *, rtx *)); static int set_src_needs_barrier PARAMS ((rtx, struct reg_flags, int, rtx)); static int rtx_needs_barrier PARAMS ((rtx, struct reg_flags, int)); static void init_insn_group_barriers PARAMS ((void)); static int group_barrier_needed_p PARAMS ((rtx)); static int safe_group_barrier_needed_p PARAMS ((rtx)); /* Update *RWS for REGNO, which is being written by the current instruction, with predicate PRED, and associated register flags in FLAGS. */ static void rws_update (rws, regno, flags, pred) struct reg_write_state *rws; int regno; struct reg_flags flags; int pred; { if (pred) rws[regno].write_count++; else rws[regno].write_count = 2; rws[regno].written_by_fp |= flags.is_fp; /* ??? Not tracking and/or across differing predicates. */ rws[regno].written_by_and = flags.is_and; rws[regno].written_by_or = flags.is_or; rws[regno].first_pred = pred; } /* Handle an access to register REGNO of type FLAGS using predicate register PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates a dependency with an earlier instruction in the same group. */ static int rws_access_regno (regno, flags, pred) int regno; struct reg_flags flags; int pred; { int need_barrier = 0; if (regno >= NUM_REGS) abort (); if (! PR_REGNO_P (regno)) flags.is_and = flags.is_or = 0; if (flags.is_write) { int write_count; /* One insn writes same reg multiple times? */ if (rws_insn[regno].write_count > 0) abort (); /* Update info for current instruction. */ rws_update (rws_insn, regno, flags, pred); write_count = rws_sum[regno].write_count; switch (write_count) { case 0: /* The register has not been written yet. */ rws_update (rws_sum, regno, flags, pred); break; case 1: /* The register has been written via a predicate. If this is not a complementary predicate, then we need a barrier. */ /* ??? This assumes that P and P+1 are always complementary predicates for P even. */ if (flags.is_and && rws_sum[regno].written_by_and) ; else if (flags.is_or && rws_sum[regno].written_by_or) ; else if ((rws_sum[regno].first_pred ^ 1) != pred) need_barrier = 1; rws_update (rws_sum, regno, flags, pred); break; case 2: /* The register has been unconditionally written already. We need a barrier. */ if (flags.is_and && rws_sum[regno].written_by_and) ; else if (flags.is_or && rws_sum[regno].written_by_or) ; else need_barrier = 1; rws_sum[regno].written_by_and = flags.is_and; rws_sum[regno].written_by_or = flags.is_or; break; default: abort (); } } else { if (flags.is_branch) { /* Branches have several RAW exceptions that allow to avoid barriers. */ if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM) /* RAW dependencies on branch regs are permissible as long as the writer is a non-branch instruction. Since we never generate code that uses a branch register written by a branch instruction, handling this case is easy. */ return 0; if (REGNO_REG_CLASS (regno) == PR_REGS && ! rws_sum[regno].written_by_fp) /* The predicates of a branch are available within the same insn group as long as the predicate was written by something other than a floating-point instruction. */ return 0; } if (flags.is_and && rws_sum[regno].written_by_and) return 0; if (flags.is_or && rws_sum[regno].written_by_or) return 0; switch (rws_sum[regno].write_count) { case 0: /* The register has not been written yet. */ break; case 1: /* The register has been written via a predicate. If this is not a complementary predicate, then we need a barrier. */ /* ??? This assumes that P and P+1 are always complementary predicates for P even. */ if ((rws_sum[regno].first_pred ^ 1) != pred) need_barrier = 1; break; case 2: /* The register has been unconditionally written already. We need a barrier. */ need_barrier = 1; break; default: abort (); } } return need_barrier; } static int rws_access_reg (reg, flags, pred) rtx reg; struct reg_flags flags; int pred; { int regno = REGNO (reg); int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); if (n == 1) return rws_access_regno (regno, flags, pred); else { int need_barrier = 0; while (--n >= 0) need_barrier |= rws_access_regno (regno + n, flags, pred); return need_barrier; } } /* Examine X, which is a SET rtx, and update the flags, the predicate, and the condition, stored in *PFLAGS, *PPRED and *PCOND. */ static void update_set_flags (x, pflags, ppred, pcond) rtx x; struct reg_flags *pflags; int *ppred; rtx *pcond; { rtx src = SET_SRC (x); *pcond = 0; switch (GET_CODE (src)) { case CALL: return; case IF_THEN_ELSE: if (SET_DEST (x) == pc_rtx) /* X is a conditional branch. */ return; else { int is_complemented = 0; /* X is a conditional move. */ rtx cond = XEXP (src, 0); if (GET_CODE (cond) == EQ) is_complemented = 1; cond = XEXP (cond, 0); if (GET_CODE (cond) != REG && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS) abort (); *pcond = cond; if (XEXP (src, 1) == SET_DEST (x) || XEXP (src, 2) == SET_DEST (x)) { /* X is a conditional move that conditionally writes the destination. */ /* We need another complement in this case. */ if (XEXP (src, 1) == SET_DEST (x)) is_complemented = ! is_complemented; *ppred = REGNO (cond); if (is_complemented) ++*ppred; } /* ??? If this is a conditional write to the dest, then this instruction does not actually read one source. This probably doesn't matter, because that source is also the dest. */ /* ??? Multiple writes to predicate registers are allowed if they are all AND type compares, or if they are all OR type compares. We do not generate such instructions currently. */ } /* ... fall through ... */ default: if (GET_RTX_CLASS (GET_CODE (src)) == '<' && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT) /* Set pflags->is_fp to 1 so that we know we're dealing with a floating point comparison when processing the destination of the SET. */ pflags->is_fp = 1; /* Discover if this is a parallel comparison. We only handle and.orcm and or.andcm at present, since we must retain a strict inverse on the predicate pair. */ else if (GET_CODE (src) == AND) pflags->is_and = 1; else if (GET_CODE (src) == IOR) pflags->is_or = 1; break; } } /* Subroutine of rtx_needs_barrier; this function determines whether the source of a given SET rtx found in X needs a barrier. FLAGS and PRED are as in rtx_needs_barrier. COND is an rtx that holds the condition for this insn. */ static int set_src_needs_barrier (x, flags, pred, cond) rtx x; struct reg_flags flags; int pred; rtx cond; { int need_barrier = 0; rtx dst; rtx src = SET_SRC (x); if (GET_CODE (src) == CALL) /* We don't need to worry about the result registers that get written by subroutine call. */ return rtx_needs_barrier (src, flags, pred); else if (SET_DEST (x) == pc_rtx) { /* X is a conditional branch. */ /* ??? This seems redundant, as the caller sets this bit for all JUMP_INSNs. */ flags.is_branch = 1; return rtx_needs_barrier (src, flags, pred); } need_barrier = rtx_needs_barrier (src, flags, pred); /* This instruction unconditionally uses a predicate register. */ if (cond) need_barrier |= rws_access_reg (cond, flags, 0); dst = SET_DEST (x); if (GET_CODE (dst) == ZERO_EXTRACT) { need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred); need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred); dst = XEXP (dst, 0); } return need_barrier; } /* Handle an access to rtx X of type FLAGS using predicate register PRED. Return 1 is this access creates a dependency with an earlier instruction in the same group. */ static int rtx_needs_barrier (x, flags, pred) rtx x; struct reg_flags flags; int pred; { int i, j; int is_complemented = 0; int need_barrier = 0; const char *format_ptr; struct reg_flags new_flags; rtx cond = 0; if (! x) return 0; new_flags = flags; switch (GET_CODE (x)) { case SET: update_set_flags (x, &new_flags, &pred, &cond); need_barrier = set_src_needs_barrier (x, new_flags, pred, cond); if (GET_CODE (SET_SRC (x)) != CALL) { new_flags.is_write = 1; need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred); } break; case CALL: new_flags.is_write = 0; need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred); /* Avoid multiple register writes, in case this is a pattern with multiple CALL rtx. This avoids an abort in rws_access_reg. */ if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count) { new_flags.is_write = 1; need_barrier |= rws_access_regno (REG_RP, new_flags, pred); need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred); need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred); } break; case COND_EXEC: /* X is a predicated instruction. */ cond = COND_EXEC_TEST (x); if (pred) abort (); need_barrier = rtx_needs_barrier (cond, flags, 0); if (GET_CODE (cond) == EQ) is_complemented = 1; cond = XEXP (cond, 0); if (GET_CODE (cond) != REG && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS) abort (); pred = REGNO (cond); if (is_complemented) ++pred; need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred); return need_barrier; case CLOBBER: case USE: /* Clobber & use are for earlier compiler-phases only. */ break; case ASM_OPERANDS: case ASM_INPUT: /* We always emit stop bits for traditional asms. We emit stop bits for volatile extended asms if TARGET_VOL_ASM_STOP is true. */ if (GET_CODE (x) != ASM_OPERANDS || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP)) { /* Avoid writing the register multiple times if we have multiple asm outputs. This avoids an abort in rws_access_reg. */ if (! rws_insn[REG_VOLATILE].write_count) { new_flags.is_write = 1; rws_access_regno (REG_VOLATILE, new_flags, pred); } return 1; } /* For all ASM_OPERANDS, we must traverse the vector of input operands. We can not just fall through here since then we would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate traditional asms unlike their normal usage. */ for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i) if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred)) need_barrier = 1; break; case PARALLEL: for (i = XVECLEN (x, 0) - 1; i >= 0; --i) { rtx pat = XVECEXP (x, 0, i); if (GET_CODE (pat) == SET) { update_set_flags (pat, &new_flags, &pred, &cond); need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond); } else if (GET_CODE (pat) == USE || GET_CODE (pat) == CALL || GET_CODE (pat) == ASM_OPERANDS) need_barrier |= rtx_needs_barrier (pat, flags, pred); else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN) abort (); } for (i = XVECLEN (x, 0) - 1; i >= 0; --i) { rtx pat = XVECEXP (x, 0, i); if (GET_CODE (pat) == SET) { if (GET_CODE (SET_SRC (pat)) != CALL) { new_flags.is_write = 1; need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags, pred); } } else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN) need_barrier |= rtx_needs_barrier (pat, flags, pred); } break; case SUBREG: x = SUBREG_REG (x); /* FALLTHRU */ case REG: if (REGNO (x) == AR_UNAT_REGNUM) { for (i = 0; i < 64; ++i) need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred); } else need_barrier = rws_access_reg (x, flags, pred); break; case MEM: /* Find the regs used in memory address computation. */ new_flags.is_write = 0; need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred); break; case CONST_INT: case CONST_DOUBLE: case SYMBOL_REF: case LABEL_REF: case CONST: break; /* Operators with side-effects. */ case POST_INC: case POST_DEC: if (GET_CODE (XEXP (x, 0)) != REG) abort (); new_flags.is_write = 0; need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred); new_flags.is_write = 1; need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred); break; case POST_MODIFY: if (GET_CODE (XEXP (x, 0)) != REG) abort (); new_flags.is_write = 0; need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred); need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred); new_flags.is_write = 1; need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred); break; /* Handle common unary and binary ops for efficiency. */ case COMPARE: case PLUS: case MINUS: case MULT: case DIV: case MOD: case UDIV: case UMOD: case AND: case IOR: case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT: case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX: case NE: case EQ: case GE: case GT: case LE: case LT: case GEU: case GTU: case LEU: case LTU: need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred); need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred); break; case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND: case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT: case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS: case SQRT: case FFS: need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred); break; case UNSPEC: switch (XINT (x, 1)) { case UNSPEC_LTOFF_DTPMOD: case UNSPEC_LTOFF_DTPREL: case UNSPEC_DTPREL: case UNSPEC_LTOFF_TPREL: case UNSPEC_TPREL: case UNSPEC_PRED_REL_MUTEX: case UNSPEC_PIC_CALL: case UNSPEC_MF: case UNSPEC_FETCHADD_ACQ: case UNSPEC_BSP_VALUE: case UNSPEC_FLUSHRS: case UNSPEC_BUNDLE_SELECTOR: break; case UNSPEC_GR_SPILL: case UNSPEC_GR_RESTORE: { HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1)); HOST_WIDE_INT bit = (offset >> 3) & 63; need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred); new_flags.is_write = (XINT (x, 1) == 1); need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit, new_flags, pred); break; } case UNSPEC_FR_SPILL: case UNSPEC_FR_RESTORE: case UNSPEC_POPCNT: need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred); break; case UNSPEC_ADDP4: need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred); break; case UNSPEC_FR_RECIP_APPROX: need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred); need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred); break; case UNSPEC_CMPXCHG_ACQ: need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred); need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred); break; default: abort (); } break; case UNSPEC_VOLATILE: switch (XINT (x, 1)) { case UNSPECV_ALLOC: /* Alloc must always be the first instruction of a group. We force this by always returning true. */ /* ??? We might get better scheduling if we explicitly check for input/local/output register dependencies, and modify the scheduler so that alloc is always reordered to the start of the current group. We could then eliminate all of the first_instruction code. */ rws_access_regno (AR_PFS_REGNUM, flags, pred); new_flags.is_write = 1; rws_access_regno (REG_AR_CFM, new_flags, pred); return 1; case UNSPECV_SET_BSP: need_barrier = 1; break; case UNSPECV_BLOCKAGE: case UNSPECV_INSN_GROUP_BARRIER: case UNSPECV_BREAK: case UNSPECV_PSAC_ALL: case UNSPECV_PSAC_NORMAL: return 0; default: abort (); } break; case RETURN: new_flags.is_write = 0; need_barrier = rws_access_regno (REG_RP, flags, pred); need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred); new_flags.is_write = 1; need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred); need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred); break; default: format_ptr = GET_RTX_FORMAT (GET_CODE (x)); for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) switch (format_ptr[i]) { case '0': /* unused field */ case 'i': /* integer */ case 'n': /* note */ case 'w': /* wide integer */ case 's': /* pointer to string */ case 'S': /* optional pointer to string */ break; case 'e': if (rtx_needs_barrier (XEXP (x, i), flags, pred)) need_barrier = 1; break; case 'E': for (j = XVECLEN (x, i) - 1; j >= 0; --j) if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred)) need_barrier = 1; break; default: abort (); } break; } return need_barrier; } /* Clear out the state for group_barrier_needed_p at the start of a sequence of insns. */ static void init_insn_group_barriers () { memset (rws_sum, 0, sizeof (rws_sum)); first_instruction = 1; } /* Given the current state, recorded by previous calls to this function, determine whether a group barrier (a stop bit) is necessary before INSN. Return nonzero if so. */ static int group_barrier_needed_p (insn) rtx insn; { rtx pat; int need_barrier = 0; struct reg_flags flags; memset (&flags, 0, sizeof (flags)); switch (GET_CODE (insn)) { case NOTE: break; case BARRIER: /* A barrier doesn't imply an instruction group boundary. */ break; case CODE_LABEL: memset (rws_insn, 0, sizeof (rws_insn)); return 1; case CALL_INSN: flags.is_branch = 1; flags.is_sibcall = SIBLING_CALL_P (insn); memset (rws_insn, 0, sizeof (rws_insn)); /* Don't bundle a call following another call. */ if ((pat = prev_active_insn (insn)) && GET_CODE (pat) == CALL_INSN) { need_barrier = 1; break; } need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0); break; case JUMP_INSN: flags.is_branch = 1; /* Don't bundle a jump following a call. */ if ((pat = prev_active_insn (insn)) && GET_CODE (pat) == CALL_INSN) { need_barrier = 1; break; } /* FALLTHRU */ case INSN: if (GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (insn)) == CLOBBER) /* Don't care about USE and CLOBBER "insns"---those are used to indicate to the optimizer that it shouldn't get rid of certain operations. */ break; pat = PATTERN (insn); /* Ug. Hack hacks hacked elsewhere. */ switch (recog_memoized (insn)) { /* We play dependency tricks with the epilogue in order to get proper schedules. Undo this for dv analysis. */ case CODE_FOR_epilogue_deallocate_stack: case CODE_FOR_prologue_allocate_stack: pat = XVECEXP (pat, 0, 0); break; /* The pattern we use for br.cloop confuses the code above. The second element of the vector is representative. */ case CODE_FOR_doloop_end_internal: pat = XVECEXP (pat, 0, 1); break; /* Doesn't generate code. */ case CODE_FOR_pred_rel_mutex: case CODE_FOR_prologue_use: return 0; default: break; } memset (rws_insn, 0, sizeof (rws_insn)); need_barrier = rtx_needs_barrier (pat, flags, 0); /* Check to see if the previous instruction was a volatile asm. */ if (! need_barrier) need_barrier = rws_access_regno (REG_VOLATILE, flags, 0); break; default: abort (); } if (first_instruction) { need_barrier = 0; first_instruction = 0; } return need_barrier; } /* Like group_barrier_needed_p, but do not clobber the current state. */ static int safe_group_barrier_needed_p (insn) rtx insn; { struct reg_write_state rws_saved[NUM_REGS]; int saved_first_instruction; int t; memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved); saved_first_instruction = first_instruction; t = group_barrier_needed_p (insn); memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved); first_instruction = saved_first_instruction; return t; } /* INSNS is an chain of instructions. Scan the chain, and insert stop bits as necessary to eliminate dependendencies. This function assumes that a final instruction scheduling pass has been run which has already inserted most of the necessary stop bits. This function only inserts new ones at basic block boundaries, since these are invisible to the scheduler. */ static void emit_insn_group_barriers (dump, insns) FILE *dump; rtx insns; { rtx insn; rtx last_label = 0; int insns_since_last_label = 0; init_insn_group_barriers (); for (insn = insns; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == CODE_LABEL) { if (insns_since_last_label) last_label = insn; insns_since_last_label = 0; } else if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK) { if (insns_since_last_label) last_label = insn; insns_since_last_label = 0; } else if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER) { init_insn_group_barriers (); last_label = 0; } else if (INSN_P (insn)) { insns_since_last_label = 1; if (group_barrier_needed_p (insn)) { if (last_label) { if (dump) fprintf (dump, "Emitting stop before label %d\n", INSN_UID (last_label)); emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label); insn = last_label; init_insn_group_barriers (); last_label = 0; } } } } } /* Like emit_insn_group_barriers, but run if no final scheduling pass was run. This function has to emit all necessary group barriers. */ static void emit_all_insn_group_barriers (dump, insns) FILE *dump ATTRIBUTE_UNUSED; rtx insns; { rtx insn; init_insn_group_barriers (); for (insn = insns; insn; insn = NEXT_INSN (insn)) { if (GET_CODE (insn) == BARRIER) { rtx last = prev_active_insn (insn); if (! last) continue; if (GET_CODE (last) == JUMP_INSN && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC) last = prev_active_insn (last); if (recog_memoized (last) != CODE_FOR_insn_group_barrier) emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last); init_insn_group_barriers (); } else if (INSN_P (insn)) { if (recog_memoized (insn) == CODE_FOR_insn_group_barrier) init_insn_group_barriers (); else if (group_barrier_needed_p (insn)) { emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn); init_insn_group_barriers (); group_barrier_needed_p (insn); } } } } static int errata_find_address_regs PARAMS ((rtx *, void *)); static void errata_emit_nops PARAMS ((rtx)); static void fixup_errata PARAMS ((void)); /* This structure is used to track some details about the previous insns groups so we can determine if it may be necessary to insert NOPs to workaround hardware errata. */ static struct group { HARD_REG_SET p_reg_set; HARD_REG_SET gr_reg_conditionally_set; } last_group[2]; /* Index into the last_group array. */ static int group_idx; /* Called through for_each_rtx; determines if a hard register that was conditionally set in the previous group is used as an address register. It ensures that for_each_rtx returns 1 in that case. */ static int errata_find_address_regs (xp, data) rtx *xp; void *data ATTRIBUTE_UNUSED; { rtx x = *xp; if (GET_CODE (x) != MEM) return 0; x = XEXP (x, 0); if (GET_CODE (x) == POST_MODIFY) x = XEXP (x, 0); if (GET_CODE (x) == REG) { struct group *prev_group = last_group + (group_idx ^ 1); if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set, REGNO (x))) return 1; return -1; } return 0; } /* Called for each insn; this function keeps track of the state in last_group and emits additional NOPs if necessary to work around an Itanium A/B step erratum. */ static void errata_emit_nops (insn) rtx insn; { struct group *this_group = last_group + group_idx; struct group *prev_group = last_group + (group_idx ^ 1); rtx pat = PATTERN (insn); rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0; rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat; enum attr_type type; rtx set = real_pat; if (GET_CODE (real_pat) == USE || GET_CODE (real_pat) == CLOBBER || GET_CODE (real_pat) == ASM_INPUT || GET_CODE (real_pat) == ADDR_VEC || GET_CODE (real_pat) == ADDR_DIFF_VEC || asm_noperands (PATTERN (insn)) >= 0) return; /* single_set doesn't work for COND_EXEC insns, so we have to duplicate parts of it. */ if (GET_CODE (set) == PARALLEL) { int i; set = XVECEXP (real_pat, 0, 0); for (i = 1; i < XVECLEN (real_pat, 0); i++) if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER) { set = 0; break; } } if (set && GET_CODE (set) != SET) set = 0; type = get_attr_type (insn); if (type == TYPE_F && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set)))) SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set))); if ((type == TYPE_M || type == TYPE_A) && cond && set && REG_P (SET_DEST (set)) && GET_CODE (SET_SRC (set)) != PLUS && GET_CODE (SET_SRC (set)) != MINUS && (GET_CODE (SET_SRC (set)) != ASHIFT || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode)) && (GET_CODE (SET_SRC (set)) != MEM || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY) && GENERAL_REGNO_P (REGNO (SET_DEST (set)))) { if (GET_RTX_CLASS (GET_CODE (cond)) != '<' || ! REG_P (XEXP (cond, 0))) abort (); if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0)))) SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set))); } if (for_each_rtx (&real_pat, errata_find_address_regs, NULL)) { emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn); emit_insn_before (gen_nop (), insn); emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn); group_idx = 0; memset (last_group, 0, sizeof last_group); } } /* Emit extra nops if they are required to work around hardware errata. */ static void fixup_errata () { rtx insn; if (! TARGET_B_STEP) return; group_idx = 0; memset (last_group, 0, sizeof last_group); for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { if (!INSN_P (insn)) continue; if (ia64_safe_type (insn) == TYPE_S) { group_idx ^= 1; memset (last_group + group_idx, 0, sizeof last_group[group_idx]); } else errata_emit_nops (insn); } } /* Instruction scheduling support. */ /* Describe one bundle. */ struct bundle { /* Zero if there's no possibility of a stop in this bundle other than at the end, otherwise the position of the optional stop bit. */ int possible_stop; /* The types of the three slots. */ enum attr_type t[3]; /* The pseudo op to be emitted into the assembler output. */ const char *name; }; #define NR_BUNDLES 10 /* A list of all available bundles. */ static const struct bundle bundle[NR_BUNDLES] = { { 2, { TYPE_M, TYPE_I, TYPE_I }, ".mii" }, { 1, { TYPE_M, TYPE_M, TYPE_I }, ".mmi" }, { 0, { TYPE_M, TYPE_F, TYPE_I }, ".mfi" }, { 0, { TYPE_M, TYPE_M, TYPE_F }, ".mmf" }, #if NR_BUNDLES == 10 { 0, { TYPE_B, TYPE_B, TYPE_B }, ".bbb" }, { 0, { TYPE_M, TYPE_B, TYPE_B }, ".mbb" }, #endif { 0, { TYPE_M, TYPE_I, TYPE_B }, ".mib" }, { 0, { TYPE_M, TYPE_M, TYPE_B }, ".mmb" }, { 0, { TYPE_M, TYPE_F, TYPE_B }, ".mfb" }, /* .mfi needs to occur earlier than .mlx, so that we only generate it if it matches an L type insn. Otherwise we'll try to generate L type nops. */ { 0, { TYPE_M, TYPE_L, TYPE_X }, ".mlx" } }; /* Describe a packet of instructions. Packets consist of two bundles that are visible to the hardware in one scheduling window. */ struct ia64_packet { const struct bundle *t1, *t2; /* Precomputed value of the first split issue in this packet if a cycle starts at its beginning. */ int first_split; /* For convenience, the insn types are replicated here so we don't have to go through T1 and T2 all the time. */ enum attr_type t[6]; }; /* An array containing all possible packets. */ #define NR_PACKETS (NR_BUNDLES * NR_BUNDLES) static struct ia64_packet packets[NR_PACKETS]; /* Map attr_type to a string with the name. */ static const char *const type_names[] = { "UNKNOWN", "A", "I", "M", "F", "B", "L", "X", "S" }; /* Nonzero if we should insert stop bits into the schedule. */ int ia64_final_schedule = 0; static int itanium_split_issue PARAMS ((const struct ia64_packet *, int)); static rtx ia64_single_set PARAMS ((rtx)); static int insn_matches_slot PARAMS ((const struct ia64_packet *, enum attr_type, int, rtx)); static void ia64_emit_insn_before PARAMS ((rtx, rtx)); static void maybe_rotate PARAMS ((FILE *)); static void finish_last_head PARAMS ((FILE *, int)); static void rotate_one_bundle PARAMS ((FILE *)); static void rotate_two_bundles PARAMS ((FILE *)); static void nop_cycles_until PARAMS ((int, FILE *)); static void cycle_end_fill_slots PARAMS ((FILE *)); static int packet_matches_p PARAMS ((const struct ia64_packet *, int, int *)); static int get_split PARAMS ((const struct ia64_packet *, int)); static int find_best_insn PARAMS ((rtx *, enum attr_type *, int, const struct ia64_packet *, int)); static void find_best_packet PARAMS ((int *, const struct ia64_packet **, rtx *, enum attr_type *, int)); static int itanium_reorder PARAMS ((FILE *, rtx *, rtx *, int)); static void dump_current_packet PARAMS ((FILE *)); static void schedule_stop PARAMS ((FILE *)); static rtx gen_nop_type PARAMS ((enum attr_type)); static void ia64_emit_nops PARAMS ((void)); /* Map a bundle number to its pseudo-op. */ const char * get_bundle_name (b) int b; { return bundle[b].name; } /* Compute the slot which will cause a split issue in packet P if the current cycle begins at slot BEGIN. */ static int itanium_split_issue (p, begin) const struct ia64_packet *p; int begin; { int type_count[TYPE_S]; int i; int split = 6; if (begin < 3) { /* Always split before and after MMF. */ if (p->t[0] == TYPE_M && p->t[1] == TYPE_M && p->t[2] == TYPE_F) return 3; if (p->t[3] == TYPE_M && p->t[4] == TYPE_M && p->t[5] == TYPE_F) return 3; /* Always split after MBB and BBB. */ if (p->t[1] == TYPE_B) return 3; /* Split after first bundle in MIB BBB combination. */ if (p->t[2] == TYPE_B && p->t[3] == TYPE_B) return 3; } memset (type_count, 0, sizeof type_count); for (i = begin; i < split; i++) { enum attr_type t0 = p->t[i]; /* An MLX bundle reserves the same units as an MFI bundle. */ enum attr_type t = (t0 == TYPE_L ? TYPE_F : t0 == TYPE_X ? TYPE_I : t0); /* Itanium can execute up to 3 branches, 2 floating point, 2 memory, and 2 integer per cycle. */ int max = (t == TYPE_B ? 3 : 2); if (type_count[t] == max) return i; type_count[t]++; } return split; } /* Return the maximum number of instructions a cpu can issue. */ static int ia64_issue_rate () { return 6; } /* Helper function - like single_set, but look inside COND_EXEC. */ static rtx ia64_single_set (insn) rtx insn; { rtx x = PATTERN (insn), ret; if (GET_CODE (x) == COND_EXEC) x = COND_EXEC_CODE (x); if (GET_CODE (x) == SET) return x; /* Special case here prologue_allocate_stack and epilogue_deallocate_stack. Although they are not classical single set, the second set is there just to protect it from moving past FP-relative stack accesses. */ switch (recog_memoized (insn)) { case CODE_FOR_prologue_allocate_stack: case CODE_FOR_epilogue_deallocate_stack: ret = XVECEXP (x, 0, 0); break; default: ret = single_set_2 (insn, x); break; } return ret; } /* Adjust the cost of a scheduling dependency. Return the new cost of a dependency LINK or INSN on DEP_INSN. COST is the current cost. */ static int ia64_adjust_cost (insn, link, dep_insn, cost) rtx insn, link, dep_insn; int cost; { enum attr_type dep_type; enum attr_itanium_class dep_class; enum attr_itanium_class insn_class; rtx dep_set, set, src, addr; if (GET_CODE (PATTERN (insn)) == CLOBBER || GET_CODE (PATTERN (insn)) == USE || GET_CODE (PATTERN (dep_insn)) == CLOBBER || GET_CODE (PATTERN (dep_insn)) == USE /* @@@ Not accurate for indirect calls. */ || GET_CODE (insn) == CALL_INSN || ia64_safe_type (insn) == TYPE_S) return 0; if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT || REG_NOTE_KIND (link) == REG_DEP_ANTI) return 0; dep_type = ia64_safe_type (dep_insn); dep_class = ia64_safe_itanium_class (dep_insn); insn_class = ia64_safe_itanium_class (insn); /* Compares that feed a conditional branch can execute in the same cycle. */ dep_set = ia64_single_set (dep_insn); set = ia64_single_set (insn); if (dep_type != TYPE_F && dep_set && GET_CODE (SET_DEST (dep_set)) == REG && PR_REG (REGNO (SET_DEST (dep_set))) && GET_CODE (insn) == JUMP_INSN) return 0; if (dep_set && GET_CODE (SET_DEST (dep_set)) == MEM) { /* ??? Can't find any information in the documenation about whether a sequence st [rx] = ra ld rb = [ry] splits issue. Assume it doesn't. */ return 0; } src = set ? SET_SRC (set) : 0; addr = 0; if (set) { if (GET_CODE (SET_DEST (set)) == MEM) addr = XEXP (SET_DEST (set), 0); else if (GET_CODE (SET_DEST (set)) == SUBREG && GET_CODE (SUBREG_REG (SET_DEST (set))) == MEM) addr = XEXP (SUBREG_REG (SET_DEST (set)), 0); else { addr = src; if (GET_CODE (addr) == UNSPEC && XVECLEN (addr, 0) > 0) addr = XVECEXP (addr, 0, 0); while (GET_CODE (addr) == SUBREG || GET_CODE (addr) == ZERO_EXTEND) addr = XEXP (addr, 0); /* Note that LO_SUM is used for GOT loads. */ if (GET_CODE (addr) == MEM || GET_CODE (addr) == LO_SUM) addr = XEXP (addr, 0); else addr = 0; } } if (addr && GET_CODE (addr) == POST_MODIFY) addr = XEXP (addr, 0); set = ia64_single_set (dep_insn); if ((dep_class == ITANIUM_CLASS_IALU || dep_class == ITANIUM_CLASS_ILOG || dep_class == ITANIUM_CLASS_LD) && (insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_ST)) { if (! addr || ! set) abort (); /* This isn't completely correct - an IALU that feeds an address has a latency of 1 cycle if it's issued in an M slot, but 2 cycles otherwise. Unfortunately there's no good way to describe this. */ if (reg_overlap_mentioned_p (SET_DEST (set), addr)) return cost + 1; } if ((dep_class == ITANIUM_CLASS_IALU || dep_class == ITANIUM_CLASS_ILOG || dep_class == ITANIUM_CLASS_LD) && (insn_class == ITANIUM_CLASS_MMMUL || insn_class == ITANIUM_CLASS_MMSHF || insn_class == ITANIUM_CLASS_MMSHFI)) return 3; if (dep_class == ITANIUM_CLASS_FMAC && (insn_class == ITANIUM_CLASS_FMISC || insn_class == ITANIUM_CLASS_FCVTFX || insn_class == ITANIUM_CLASS_XMPY)) return 7; if ((dep_class == ITANIUM_CLASS_FMAC || dep_class == ITANIUM_CLASS_FMISC || dep_class == ITANIUM_CLASS_FCVTFX || dep_class == ITANIUM_CLASS_XMPY) && insn_class == ITANIUM_CLASS_STF) return 8; /* Intel docs say only LD, ST, IALU, ILOG, ISHF consumers have latency 4, but HP engineers say any non-MM operation. */ if ((dep_class == ITANIUM_CLASS_MMMUL || dep_class == ITANIUM_CLASS_MMSHF || dep_class == ITANIUM_CLASS_MMSHFI) && insn_class != ITANIUM_CLASS_MMMUL && insn_class != ITANIUM_CLASS_MMSHF && insn_class != ITANIUM_CLASS_MMSHFI) return 4; return cost; } /* Describe the current state of the Itanium pipeline. */ static struct { /* The first slot that is used in the current cycle. */ int first_slot; /* The next slot to fill. */ int cur; /* The packet we have selected for the current issue window. */ const struct ia64_packet *packet; /* The position of the split issue that occurs due to issue width limitations (6 if there's no split issue). */ int split; /* Record data about the insns scheduled so far in the same issue window. The elements up to but not including FIRST_SLOT belong to the previous cycle, the ones starting with FIRST_SLOT belong to the current cycle. */ enum attr_type types[6]; rtx insns[6]; int stopbit[6]; /* Nonzero if we decided to schedule a stop bit. */ int last_was_stop; } sched_data; /* Temporary arrays; they have enough elements to hold all insns that can be ready at the same time while scheduling of the current block. SCHED_READY can hold ready insns, SCHED_TYPES their types. */ static rtx *sched_ready; static enum attr_type *sched_types; /* Determine whether an insn INSN of type ITYPE can fit into slot SLOT of packet P. */ static int insn_matches_slot (p, itype, slot, insn) const struct ia64_packet *p; enum attr_type itype; int slot; rtx insn; { enum attr_itanium_requires_unit0 u0; enum attr_type stype = p->t[slot]; if (insn) { u0 = ia64_safe_itanium_requires_unit0 (insn); if (u0 == ITANIUM_REQUIRES_UNIT0_YES) { int i; for (i = sched_data.first_slot; i < slot; i++) if (p->t[i] == stype || (stype == TYPE_F && p->t[i] == TYPE_L) || (stype == TYPE_I && p->t[i] == TYPE_X)) return 0; } if (GET_CODE (insn) == CALL_INSN) { /* Reject calls in multiway branch packets. We want to limit the number of multiway branches we generate (since the branch predictor is limited), and this seems to work fairly well. (If we didn't do this, we'd have to add another test here to force calls into the third slot of the bundle.) */ if (slot < 3) { if (p->t[1] == TYPE_B) return 0; } else { if (p->t[4] == TYPE_B) return 0; } } } if (itype == stype) return 1; if (itype == TYPE_A) return stype == TYPE_M || stype == TYPE_I; return 0; } /* Like emit_insn_before, but skip cycle_display notes. ??? When cycle display notes are implemented, update this. */ static void ia64_emit_insn_before (insn, before) rtx insn, before; { emit_insn_before (insn, before); } /* When rotating a bundle out of the issue window, insert a bundle selector insn in front of it. DUMP is the scheduling dump file or NULL. START is either 0 or 3, depending on whether we want to emit a bundle selector for the first bundle or the second bundle in the current issue window. The selector insns are emitted this late because the selected packet can be changed until parts of it get rotated out. */ static void finish_last_head (dump, start) FILE *dump; int start; { const struct ia64_packet *p = sched_data.packet; const struct bundle *b = start == 0 ? p->t1 : p->t2; int bundle_type = b - bundle; rtx insn; int i; if (! ia64_final_schedule) return; for (i = start; sched_data.insns[i] == 0; i++) if (i == start + 3) abort (); insn = sched_data.insns[i]; if (dump) fprintf (dump, "// Emitting template before %d: %s\n", INSN_UID (insn), b->name); ia64_emit_insn_before (gen_bundle_selector (GEN_INT (bundle_type)), insn); } /* We can't schedule more insns this cycle. Fix up the scheduling state and advance FIRST_SLOT and CUR. We have to distribute the insns that are currently found between FIRST_SLOT and CUR into the slots of the packet we have selected. So far, they are stored successively in the fields starting at FIRST_SLOT; now they must be moved to the correct slots. DUMP is the current scheduling dump file, or NULL. */ static void cycle_end_fill_slots (dump) FILE *dump; { const struct ia64_packet *packet = sched_data.packet; int slot, i; enum attr_type tmp_types[6]; rtx tmp_insns[6]; memcpy (tmp_types, sched_data.types, 6 * sizeof (enum attr_type)); memcpy (tmp_insns, sched_data.insns, 6 * sizeof (rtx)); for (i = slot = sched_data.first_slot; i < sched_data.cur; i++) { enum attr_type t = tmp_types[i]; if (t != ia64_safe_type (tmp_insns[i])) abort (); while (! insn_matches_slot (packet, t, slot, tmp_insns[i])) { if (slot > sched_data.split) abort (); if (dump) fprintf (dump, "// Packet needs %s, have %s\n", type_names[packet->t[slot]], type_names[t]); sched_data.types[slot] = packet->t[slot]; sched_data.insns[slot] = 0; sched_data.stopbit[slot] = 0; /* ??? TYPE_L instructions always fill up two slots, but we don't support TYPE_L nops. */ if (packet->t[slot] == TYPE_L) abort (); slot++; } /* Do _not_ use T here. If T == TYPE_A, then we'd risk changing the actual slot type later. */ sched_data.types[slot] = packet->t[slot]; sched_data.insns[slot] = tmp_insns[i]; sched_data.stopbit[slot] = 0; slot++; /* TYPE_L instructions always fill up two slots. */ if (t == TYPE_L) { sched_data.types[slot] = packet->t[slot]; sched_data.insns[slot] = 0; sched_data.stopbit[slot] = 0; slot++; } } /* This isn't right - there's no need to pad out until the forced split; the CPU will automatically split if an insn isn't ready. */ sched_data.first_slot = sched_data.cur = slot; } /* Bundle rotations, as described in the Itanium optimization manual. We can rotate either one or both bundles out of the issue window. DUMP is the current scheduling dump file, or NULL. */ static void rotate_one_bundle (dump) FILE *dump; { if (dump) fprintf (dump, "// Rotating one bundle.\n"); finish_last_head (dump, 0); if (sched_data.cur > 3) { sched_data.cur -= 3; sched_data.first_slot -= 3; memmove (sched_data.types, sched_data.types + 3, sched_data.cur * sizeof *sched_data.types); memmove (sched_data.stopbit, sched_data.stopbit + 3, sched_data.cur * sizeof *sched_data.stopbit); memmove (sched_data.insns, sched_data.insns + 3, sched_data.cur * sizeof *sched_data.insns); sched_data.packet = &packets[(sched_data.packet->t2 - bundle) * NR_BUNDLES]; } else { sched_data.cur = 0; sched_data.first_slot = 0; } } static void rotate_two_bundles (dump) FILE *dump; { if (dump) fprintf (dump, "// Rotating two bundles.\n"); if (sched_data.cur == 0) return; finish_last_head (dump, 0); if (sched_data.cur > 3) finish_last_head (dump, 3); sched_data.cur = 0; sched_data.first_slot = 0; } /* We're beginning a new block. Initialize data structures as necessary. */ static void ia64_sched_init (dump, sched_verbose, max_ready) FILE *dump ATTRIBUTE_UNUSED; int sched_verbose ATTRIBUTE_UNUSED; int max_ready; { static int initialized = 0; if (! initialized) { int b1, b2, i; initialized = 1; for (i = b1 = 0; b1 < NR_BUNDLES; b1++) { const struct bundle *t1 = bundle + b1; for (b2 = 0; b2 < NR_BUNDLES; b2++, i++) { const struct bundle *t2 = bundle + b2; packets[i].t1 = t1; packets[i].t2 = t2; } } for (i = 0; i < NR_PACKETS; i++) { int j; for (j = 0; j < 3; j++) packets[i].t[j] = packets[i].t1->t[j]; for (j = 0; j < 3; j++) packets[i].t[j + 3] = packets[i].t2->t[j]; packets[i].first_split = itanium_split_issue (packets + i, 0); } } init_insn_group_barriers (); memset (&sched_data, 0, sizeof sched_data); sched_types = (enum attr_type *) xmalloc (max_ready * sizeof (enum attr_type)); sched_ready = (rtx *) xmalloc (max_ready * sizeof (rtx)); } /* See if the packet P can match the insns we have already scheduled. Return nonzero if so. In *PSLOT, we store the first slot that is available for more instructions if we choose this packet. SPLIT holds the last slot we can use, there's a split issue after it so scheduling beyond it would cause us to use more than one cycle. */ static int packet_matches_p (p, split, pslot) const struct ia64_packet *p; int split; int *pslot; { int filled = sched_data.cur; int first = sched_data.first_slot; int i, slot; /* First, check if the first of the two bundles must be a specific one (due to stop bits). */ if (first > 0 && sched_data.stopbit[0] && p->t1->possible_stop != 1) return 0; if (first > 1 && sched_data.stopbit[1] && p->t1->possible_stop != 2) return 0; for (i = 0; i < first; i++) if (! insn_matches_slot (p, sched_data.types[i], i, sched_data.insns[i])) return 0; for (i = slot = first; i < filled; i++) { while (slot < split) { if (insn_matches_slot (p, sched_data.types[i], slot, sched_data.insns[i])) break; slot++; } if (slot == split) return 0; slot++; } if (pslot) *pslot = slot; return 1; } /* A frontend for itanium_split_issue. For a packet P and a slot number FIRST that describes the start of the current clock cycle, return the slot number of the first split issue. This function uses the cached number found in P if possible. */ static int get_split (p, first) const struct ia64_packet *p; int first; { if (first == 0) return p->first_split; return itanium_split_issue (p, first); } /* Given N_READY insns in the array READY, whose types are found in the corresponding array TYPES, return the insn that is best suited to be scheduled in slot SLOT of packet P. */ static int find_best_insn (ready, types, n_ready, p, slot) rtx *ready; enum attr_type *types; int n_ready; const struct ia64_packet *p; int slot; { int best = -1; int best_pri = 0; while (n_ready-- > 0) { rtx insn = ready[n_ready]; if (! insn) continue; if (best >= 0 && INSN_PRIORITY (ready[n_ready]) < best_pri) break; /* If we have equally good insns, one of which has a stricter slot requirement, prefer the one with the stricter requirement. */ if (best >= 0 && types[n_ready] == TYPE_A) continue; if (insn_matches_slot (p, types[n_ready], slot, insn)) { best = n_ready; best_pri = INSN_PRIORITY (ready[best]); /* If there's no way we could get a stricter requirement, stop looking now. */ if (types[n_ready] != TYPE_A && ia64_safe_itanium_requires_unit0 (ready[n_ready])) break; break; } } return best; } /* Select the best packet to use given the current scheduler state and the current ready list. READY is an array holding N_READY ready insns; TYPES is a corresponding array that holds their types. Store the best packet in *PPACKET and the number of insns that can be scheduled in the current cycle in *PBEST. */ static void find_best_packet (pbest, ppacket, ready, types, n_ready) int *pbest; const struct ia64_packet **ppacket; rtx *ready; enum attr_type *types; int n_ready; { int first = sched_data.first_slot; int best = 0; int lowest_end = 6; const struct ia64_packet *best_packet = NULL; int i; for (i = 0; i < NR_PACKETS; i++) { const struct ia64_packet *p = packets + i; int slot; int split = get_split (p, first); int win = 0; int first_slot, last_slot; int b_nops = 0; if (! packet_matches_p (p, split, &first_slot)) continue; memcpy (sched_ready, ready, n_ready * sizeof (rtx)); win = 0; last_slot = 6; for (slot = first_slot; slot < split; slot++) { int insn_nr; /* Disallow a degenerate case where the first bundle doesn't contain anything but NOPs! */ if (first_slot == 0 && win == 0 && slot == 3) { win = -1; break; } insn_nr = find_best_insn (sched_ready, types, n_ready, p, slot); if (insn_nr >= 0) { sched_ready[insn_nr] = 0; last_slot = slot; win++; } else if (p->t[slot] == TYPE_B) b_nops++; } /* We must disallow MBB/BBB packets if any of their B slots would be filled with nops. */ if (last_slot < 3) { if (p->t[1] == TYPE_B && (b_nops || last_slot < 2)) win = -1; } else { if (p->t[4] == TYPE_B && (b_nops || last_slot < 5)) win = -1; } if (win > best || (win == best && last_slot < lowest_end)) { best = win; lowest_end = last_slot; best_packet = p; } } *pbest = best; *ppacket = best_packet; } /* Reorder the ready list so that the insns that can be issued in this cycle are found in the correct order at the end of the list. DUMP is the scheduling dump file, or NULL. READY points to the start, E_READY to the end of the ready list. MAY_FAIL determines what should be done if no insns can be scheduled in this cycle: if it is zero, we abort, otherwise we return 0. Return 1 if any insns can be scheduled in this cycle. */ static int itanium_reorder (dump, ready, e_ready, may_fail) FILE *dump; rtx *ready; rtx *e_ready; int may_fail; { const struct ia64_packet *best_packet; int n_ready = e_ready - ready; int first = sched_data.first_slot; int i, best, best_split, filled; for (i = 0; i < n_ready; i++) sched_types[i] = ia64_safe_type (ready[i]); find_best_packet (&best, &best_packet, ready, sched_types, n_ready); if (best == 0) { if (may_fail) return 0; abort (); } if (dump) { fprintf (dump, "// Selected bundles: %s %s (%d insns)\n", best_packet->t1->name, best_packet->t2 ? best_packet->t2->name : NULL, best); } best_split = itanium_split_issue (best_packet, first); packet_matches_p (best_packet, best_split, &filled); for (i = filled; i < best_split; i++) { int insn_nr; insn_nr = find_best_insn (ready, sched_types, n_ready, best_packet, i); if (insn_nr >= 0) { rtx insn = ready[insn_nr]; memmove (ready + insn_nr, ready + insn_nr + 1, (n_ready - insn_nr - 1) * sizeof (rtx)); memmove (sched_types + insn_nr, sched_types + insn_nr + 1, (n_ready - insn_nr - 1) * sizeof (enum attr_type)); ready[--n_ready] = insn; } } sched_data.packet = best_packet; sched_data.split = best_split; return 1; } /* Dump information about the current scheduling state to file DUMP. */ static void dump_current_packet (dump) FILE *dump; { int i; fprintf (dump, "// %d slots filled:", sched_data.cur); for (i = 0; i < sched_data.first_slot; i++) { rtx insn = sched_data.insns[i]; fprintf (dump, " %s", type_names[sched_data.types[i]]); if (insn) fprintf (dump, "/%s", type_names[ia64_safe_type (insn)]); if (sched_data.stopbit[i]) fprintf (dump, " ;;"); } fprintf (dump, " :::"); for (i = sched_data.first_slot; i < sched_data.cur; i++) { rtx insn = sched_data.insns[i]; enum attr_type t = ia64_safe_type (insn); fprintf (dump, " (%d) %s", INSN_UID (insn), type_names[t]); } fprintf (dump, "\n"); } /* Schedule a stop bit. DUMP is the current scheduling dump file, or NULL. */ static void schedule_stop (dump) FILE *dump; { const struct ia64_packet *best = sched_data.packet; int i; int best_stop = 6; if (dump) fprintf (dump, "// Stop bit, cur = %d.\n", sched_data.cur); if (sched_data.cur == 0) { if (dump) fprintf (dump, "// At start of bundle, so nothing to do.\n"); rotate_two_bundles (NULL); return; } for (i = -1; i < NR_PACKETS; i++) { /* This is a slight hack to give the current packet the first chance. This is done to avoid e.g. switching from MIB to MBB bundles. */ const struct ia64_packet *p = (i >= 0 ? packets + i : sched_data.packet); int split = get_split (p, sched_data.first_slot); const struct bundle *compare; int next, stoppos; if (! packet_matches_p (p, split, &next)) continue; compare = next > 3 ? p->t2 : p->t1; stoppos = 3; if (compare->possible_stop) stoppos = compare->possible_stop; if (next > 3) stoppos += 3; if (stoppos < next || stoppos >= best_stop) { if (compare->possible_stop == 0) continue; stoppos = (next > 3 ? 6 : 3); } if (stoppos < next || stoppos >= best_stop) continue; if (dump) fprintf (dump, "// switching from %s %s to %s %s (stop at %d)\n", best->t1->name, best->t2->name, p->t1->name, p->t2->name, stoppos); best_stop = stoppos; best = p; } sched_data.packet = best; cycle_end_fill_slots (dump); while (sched_data.cur < best_stop) { sched_data.types[sched_data.cur] = best->t[sched_data.cur]; sched_data.insns[sched_data.cur] = 0; sched_data.stopbit[sched_data.cur] = 0; sched_data.cur++; } sched_data.stopbit[sched_data.cur - 1] = 1; sched_data.first_slot = best_stop; if (dump) dump_current_packet (dump); } /* If necessary, perform one or two rotations on the scheduling state. This should only be called if we are starting a new cycle. */ static void maybe_rotate (dump) FILE *dump; { cycle_end_fill_slots (dump); if (sched_data.cur == 6) rotate_two_bundles (dump); else if (sched_data.cur >= 3) rotate_one_bundle (dump); sched_data.first_slot = sched_data.cur; } /* The clock cycle when ia64_sched_reorder was last called. */ static int prev_cycle; /* The first insn scheduled in the previous cycle. This is the saved value of sched_data.first_slot. */ static int prev_first; /* Emit NOPs to fill the delay between PREV_CYCLE and CLOCK_VAR. Used to pad out the delay between MM (shifts, etc.) and integer operations. */ static void nop_cycles_until (clock_var, dump) int clock_var; FILE *dump; { int prev_clock = prev_cycle; int cycles_left = clock_var - prev_clock; bool did_stop = false; /* Finish the previous cycle; pad it out with NOPs. */ if (sched_data.cur == 3) { sched_emit_insn (gen_insn_group_barrier (GEN_INT (3))); did_stop = true; maybe_rotate (dump); } else if (sched_data.cur > 0) { int need_stop = 0; int split = itanium_split_issue (sched_data.packet, prev_first); if (sched_data.cur < 3 && split > 3) { split = 3; need_stop = 1; } if (split > sched_data.cur) { int i; for (i = sched_data.cur; i < split; i++) { rtx t = sched_emit_insn (gen_nop_type (sched_data.packet->t[i])); sched_data.types[i] = sched_data.packet->t[i]; sched_data.insns[i] = t; sched_data.stopbit[i] = 0; } sched_data.cur = split; } if (! need_stop && sched_data.cur > 0 && sched_data.cur < 6 && cycles_left > 1) { int i; for (i = sched_data.cur; i < 6; i++) { rtx t = sched_emit_insn (gen_nop_type (sched_data.packet->t[i])); sched_data.types[i] = sched_data.packet->t[i]; sched_data.insns[i] = t; sched_data.stopbit[i] = 0; } sched_data.cur = 6; cycles_left--; need_stop = 1; } if (need_stop || sched_data.cur == 6) { sched_emit_insn (gen_insn_group_barrier (GEN_INT (3))); did_stop = true; } maybe_rotate (dump); } cycles_left--; while (cycles_left > 0) { sched_emit_insn (gen_bundle_selector (GEN_INT (0))); sched_emit_insn (gen_nop_type (TYPE_M)); sched_emit_insn (gen_nop_type (TYPE_I)); if (cycles_left > 1) { sched_emit_insn (gen_insn_group_barrier (GEN_INT (2))); cycles_left--; } sched_emit_insn (gen_nop_type (TYPE_I)); sched_emit_insn (gen_insn_group_barrier (GEN_INT (3))); did_stop = true; cycles_left--; } if (did_stop) init_insn_group_barriers (); } /* We are about to being issuing insns for this clock cycle. Override the default sort algorithm to better slot instructions. */ static int ia64_internal_sched_reorder (dump, sched_verbose, ready, pn_ready, reorder_type, clock_var) FILE *dump ATTRIBUTE_UNUSED; int sched_verbose ATTRIBUTE_UNUSED; rtx *ready; int *pn_ready; int reorder_type, clock_var; { int n_asms; int n_ready = *pn_ready; rtx *e_ready = ready + n_ready; rtx *insnp; if (sched_verbose) { fprintf (dump, "// ia64_sched_reorder (type %d):\n", reorder_type); dump_current_packet (dump); } /* Work around the pipeline flush that will occurr if the results of an MM instruction are accessed before the result is ready. Intel documentation says this only happens with IALU, ISHF, ILOG, LD, and ST consumers, but experimental evidence shows that *any* non-MM type instruction will incurr the flush. */ if (reorder_type == 0 && clock_var > 0 && ia64_final_schedule) { for (insnp = ready; insnp < e_ready; insnp++) { rtx insn = *insnp, link; enum attr_itanium_class t = ia64_safe_itanium_class (insn); if (t == ITANIUM_CLASS_MMMUL || t == ITANIUM_CLASS_MMSHF || t == ITANIUM_CLASS_MMSHFI) continue; for (link = LOG_LINKS (insn); link; link = XEXP (link, 1)) if (REG_NOTE_KIND (link) == 0) { rtx other = XEXP (link, 0); enum attr_itanium_class t0 = ia64_safe_itanium_class (other); if (t0 == ITANIUM_CLASS_MMSHF || t0 == ITANIUM_CLASS_MMMUL) { nop_cycles_until (clock_var, sched_verbose ? dump : NULL); goto out; } } } } out: prev_first = sched_data.first_slot; prev_cycle = clock_var; if (reorder_type == 0) maybe_rotate (sched_verbose ? dump : NULL); /* First, move all USEs, CLOBBERs and other crud out of the way. */ n_asms = 0; for (insnp = ready; insnp < e_ready; insnp++) if (insnp < e_ready) { rtx insn = *insnp; enum attr_type t = ia64_safe_type (insn); if (t == TYPE_UNKNOWN) { if (GET_CODE (PATTERN (insn)) == ASM_INPUT || asm_noperands (PATTERN (insn)) >= 0) { rtx lowest = ready[n_asms]; ready[n_asms] = insn; *insnp = lowest; n_asms++; } else { rtx highest = ready[n_ready - 1]; ready[n_ready - 1] = insn; *insnp = highest; if (ia64_final_schedule && group_barrier_needed_p (insn)) { schedule_stop (sched_verbose ? dump : NULL); sched_data.last_was_stop = 1; maybe_rotate (sched_verbose ? dump : NULL); } return 1; } } } if (n_asms < n_ready) { /* Some normal insns to process. Skip the asms. */ ready += n_asms; n_ready -= n_asms; } else if (n_ready > 0) { /* Only asm insns left. */ if (ia64_final_schedule && group_barrier_needed_p (ready[n_ready - 1])) { schedule_stop (sched_verbose ? dump : NULL); sched_data.last_was_stop = 1; maybe_rotate (sched_verbose ? dump : NULL); } cycle_end_fill_slots (sched_verbose ? dump : NULL); return 1; } if (ia64_final_schedule) { int nr_need_stop = 0; for (insnp = ready; insnp < e_ready; insnp++) if (safe_group_barrier_needed_p (*insnp)) nr_need_stop++; /* Schedule a stop bit if - all insns require a stop bit, or - we are starting a new cycle and _any_ insns require a stop bit. The reason for the latter is that if our schedule is accurate, then the additional stop won't decrease performance at this point (since there's a split issue at this point anyway), but it gives us more freedom when scheduling the currently ready insns. */ if ((reorder_type == 0 && nr_need_stop) || (reorder_type == 1 && n_ready == nr_need_stop)) { schedule_stop (sched_verbose ? dump : NULL); sched_data.last_was_stop = 1; maybe_rotate (sched_verbose ? dump : NULL); if (reorder_type == 1) return 0; } else { int deleted = 0; insnp = e_ready; /* Move down everything that needs a stop bit, preserving relative order. */ while (insnp-- > ready + deleted) while (insnp >= ready + deleted) { rtx insn = *insnp; if (! safe_group_barrier_needed_p (insn)) break; memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx)); *ready = insn; deleted++; } n_ready -= deleted; ready += deleted; if (deleted != nr_need_stop) abort (); } } return itanium_reorder (sched_verbose ? dump : NULL, ready, e_ready, reorder_type == 1); } static int ia64_sched_reorder (dump, sched_verbose, ready, pn_ready, clock_var) FILE *dump; int sched_verbose; rtx *ready; int *pn_ready; int clock_var; { return ia64_internal_sched_reorder (dump, sched_verbose, ready, pn_ready, 0, clock_var); } /* Like ia64_sched_reorder, but called after issuing each insn. Override the default sort algorithm to better slot instructions. */ static int ia64_sched_reorder2 (dump, sched_verbose, ready, pn_ready, clock_var) FILE *dump ATTRIBUTE_UNUSED; int sched_verbose ATTRIBUTE_UNUSED; rtx *ready; int *pn_ready; int clock_var; { if (sched_data.last_was_stop) return 0; /* Detect one special case and try to optimize it. If we have 1.M;;MI 2.MIx, and slots 2.1 (M) and 2.2 (I) are both NOPs, then we can get better code by transforming this to 1.MFB;; 2.MIx. */ if (sched_data.first_slot == 1 && sched_data.stopbit[0] && ((sched_data.cur == 4 && (sched_data.types[1] == TYPE_M || sched_data.types[1] == TYPE_A) && (sched_data.types[2] == TYPE_I || sched_data.types[2] == TYPE_A) && (sched_data.types[3] != TYPE_M && sched_data.types[3] != TYPE_A)) || (sched_data.cur == 3 && (sched_data.types[1] == TYPE_M || sched_data.types[1] == TYPE_A) && (sched_data.types[2] != TYPE_M && sched_data.types[2] != TYPE_I && sched_data.types[2] != TYPE_A)))) { int i, best; rtx stop = sched_data.insns[1]; /* Search backward for the stop bit that must be there. */ while (1) { int insn_code; stop = PREV_INSN (stop); if (GET_CODE (stop) != INSN) abort (); insn_code = recog_memoized (stop); /* Ignore .pred.rel.mutex. ??? Update this to ignore cycle display notes too ??? once those are implemented */ if (insn_code == CODE_FOR_pred_rel_mutex || insn_code == CODE_FOR_prologue_use) continue; if (insn_code == CODE_FOR_insn_group_barrier) break; abort (); } /* Adjust the stop bit's slot selector. */ if (INTVAL (XVECEXP (PATTERN (stop), 0, 0)) != 1) abort (); XVECEXP (PATTERN (stop), 0, 0) = GEN_INT (3); sched_data.stopbit[0] = 0; sched_data.stopbit[2] = 1; sched_data.types[5] = sched_data.types[3]; sched_data.types[4] = sched_data.types[2]; sched_data.types[3] = sched_data.types[1]; sched_data.insns[5] = sched_data.insns[3]; sched_data.insns[4] = sched_data.insns[2]; sched_data.insns[3] = sched_data.insns[1]; sched_data.stopbit[5] = sched_data.stopbit[4] = sched_data.stopbit[3] = 0; sched_data.cur += 2; sched_data.first_slot = 3; for (i = 0; i < NR_PACKETS; i++) { const struct ia64_packet *p = packets + i; if (p->t[0] == TYPE_M && p->t[1] == TYPE_F && p->t[2] == TYPE_B) { sched_data.packet = p; break; } } rotate_one_bundle (sched_verbose ? dump : NULL); best = 6; for (i = 0; i < NR_PACKETS; i++) { const struct ia64_packet *p = packets + i; int split = get_split (p, sched_data.first_slot); int next; /* Disallow multiway branches here. */ if (p->t[1] == TYPE_B) continue; if (packet_matches_p (p, split, &next) && next < best) { best = next; sched_data.packet = p; sched_data.split = split; } } if (best == 6) abort (); } if (*pn_ready > 0) { int more = ia64_internal_sched_reorder (dump, sched_verbose, ready, pn_ready, 1, clock_var); if (more) return more; /* Did we schedule a stop? If so, finish this cycle. */ if (sched_data.cur == sched_data.first_slot) return 0; } if (sched_verbose) fprintf (dump, "// Can't issue more this cycle; updating type array.\n"); cycle_end_fill_slots (sched_verbose ? dump : NULL); if (sched_verbose) dump_current_packet (dump); return 0; } /* We are about to issue INSN. Return the number of insns left on the ready queue that can be issued this cycle. */ static int ia64_variable_issue (dump, sched_verbose, insn, can_issue_more) FILE *dump; int sched_verbose; rtx insn; int can_issue_more ATTRIBUTE_UNUSED; { enum attr_type t = ia64_safe_type (insn); if (sched_data.last_was_stop) { int t = sched_data.first_slot; if (t == 0) t = 3; ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (t)), insn); init_insn_group_barriers (); sched_data.last_was_stop = 0; } if (t == TYPE_UNKNOWN) { if (sched_verbose) fprintf (dump, "// Ignoring type %s\n", type_names[t]); if (GET_CODE (PATTERN (insn)) == ASM_INPUT || asm_noperands (PATTERN (insn)) >= 0) { /* This must be some kind of asm. Clear the scheduling state. */ rotate_two_bundles (sched_verbose ? dump : NULL); if (ia64_final_schedule) group_barrier_needed_p (insn); } return 1; } /* This is _not_ just a sanity check. group_barrier_needed_p will update important state info. Don't delete this test. */ if (ia64_final_schedule && group_barrier_needed_p (insn)) abort (); sched_data.stopbit[sched_data.cur] = 0; sched_data.insns[sched_data.cur] = insn; sched_data.types[sched_data.cur] = t; sched_data.cur++; if (sched_verbose) fprintf (dump, "// Scheduling insn %d of type %s\n", INSN_UID (insn), type_names[t]); if (GET_CODE (insn) == CALL_INSN && ia64_final_schedule) { schedule_stop (sched_verbose ? dump : NULL); sched_data.last_was_stop = 1; } return 1; } /* Free data allocated by ia64_sched_init. */ static void ia64_sched_finish (dump, sched_verbose) FILE *dump; int sched_verbose; { if (sched_verbose) fprintf (dump, "// Finishing schedule.\n"); rotate_two_bundles (NULL); free (sched_types); free (sched_ready); } /* Emit pseudo-ops for the assembler to describe predicate relations. At present this assumes that we only consider predicate pairs to be mutex, and that the assembler can deduce proper values from straight-line code. */ static void emit_predicate_relation_info () { basic_block bb; FOR_EACH_BB_REVERSE (bb) { int r; rtx head = bb->head; /* We only need such notes at code labels. */ if (GET_CODE (head) != CODE_LABEL) continue; if (GET_CODE (NEXT_INSN (head)) == NOTE && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK) head = NEXT_INSN (head); for (r = PR_REG (0); r < PR_REG (64); r += 2) if (REGNO_REG_SET_P (bb->global_live_at_start, r)) { rtx p = gen_rtx_REG (BImode, r); rtx n = emit_insn_after (gen_pred_rel_mutex (p), head); if (head == bb->end) bb->end = n; head = n; } } /* Look for conditional calls that do not return, and protect predicate relations around them. Otherwise the assembler will assume the call returns, and complain about uses of call-clobbered predicates after the call. */ FOR_EACH_BB_REVERSE (bb) { rtx insn = bb->head; while (1) { if (GET_CODE (insn) == CALL_INSN && GET_CODE (PATTERN (insn)) == COND_EXEC && find_reg_note (insn, REG_NORETURN, NULL_RTX)) { rtx b = emit_insn_before (gen_safe_across_calls_all (), insn); rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn); if (bb->head == insn) bb->head = b; if (bb->end == insn) bb->end = a; } if (insn == bb->end) break; insn = NEXT_INSN (insn); } } } /* Generate a NOP instruction of type T. We will never generate L type nops. */ static rtx gen_nop_type (t) enum attr_type t; { switch (t) { case TYPE_M: return gen_nop_m (); case TYPE_I: return gen_nop_i (); case TYPE_B: return gen_nop_b (); case TYPE_F: return gen_nop_f (); case TYPE_X: return gen_nop_x (); default: abort (); } } /* After the last scheduling pass, fill in NOPs. It's easier to do this here than while scheduling. */ static void ia64_emit_nops () { rtx insn; const struct bundle *b = 0; int bundle_pos = 0; for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) { rtx pat; enum attr_type t; pat = INSN_P (insn) ? PATTERN (insn) : const0_rtx; if (GET_CODE (pat) == USE || GET_CODE (pat) == CLOBBER) continue; if ((GET_CODE (pat) == UNSPEC && XINT (pat, 1) == UNSPEC_BUNDLE_SELECTOR) || GET_CODE (insn) == CODE_LABEL) { if (b) while (bundle_pos < 3) { emit_insn_before (gen_nop_type (b->t[bundle_pos]), insn); bundle_pos++; } if (GET_CODE (insn) != CODE_LABEL) b = bundle + INTVAL (XVECEXP (pat, 0, 0)); else b = 0; bundle_pos = 0; continue; } else if (GET_CODE (pat) == UNSPEC_VOLATILE && XINT (pat, 1) == UNSPECV_INSN_GROUP_BARRIER) { int t = INTVAL (XVECEXP (pat, 0, 0)); if (b) while (bundle_pos < t) { emit_insn_before (gen_nop_type (b->t[bundle_pos]), insn); bundle_pos++; } continue; } if (bundle_pos == 3) b = 0; if (b && INSN_P (insn)) { t = ia64_safe_type (insn); if (asm_noperands (PATTERN (insn)) >= 0 || GET_CODE (PATTERN (insn)) == ASM_INPUT) { while (bundle_pos < 3) { emit_insn_before (gen_nop_type (b->t[bundle_pos]), insn); bundle_pos++; } continue; } if (t == TYPE_UNKNOWN) continue; while (bundle_pos < 3) { if (t == b->t[bundle_pos] || (t == TYPE_A && (b->t[bundle_pos] == TYPE_M || b->t[bundle_pos] == TYPE_I))) break; emit_insn_before (gen_nop_type (b->t[bundle_pos]), insn); bundle_pos++; } if (bundle_pos < 3) bundle_pos++; } } } /* Perform machine dependent operations on the rtl chain INSNS. */ void ia64_reorg (insns) rtx insns; { /* We are freeing block_for_insn in the toplev to keep compatibility with old MDEP_REORGS that are not CFG based. Recompute it now. */ compute_bb_for_insn (); /* If optimizing, we'll have split before scheduling. */ if (optimize == 0) split_all_insns (0); /* ??? update_life_info_in_dirty_blocks fails to terminate during non-optimizing bootstrap. */ update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES); if (ia64_flag_schedule_insns2) { timevar_push (TV_SCHED2); ia64_final_schedule = 1; schedule_ebbs (rtl_dump_file); ia64_final_schedule = 0; timevar_pop (TV_SCHED2); /* This relies on the NOTE_INSN_BASIC_BLOCK notes to be in the same place as they were during scheduling. */ emit_insn_group_barriers (rtl_dump_file, insns); ia64_emit_nops (); } else emit_all_insn_group_barriers (rtl_dump_file, insns); /* A call must not be the last instruction in a function, so that the return address is still within the function, so that unwinding works properly. Note that IA-64 differs from dwarf2 on this point. */ if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)) { rtx insn; int saw_stop = 0; insn = get_last_insn (); if (! INSN_P (insn)) insn = prev_active_insn (insn); if (GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER) { saw_stop = 1; insn = prev_active_insn (insn); } if (GET_CODE (insn) == CALL_INSN) { if (! saw_stop) emit_insn (gen_insn_group_barrier (GEN_INT (3))); emit_insn (gen_break_f ()); emit_insn (gen_insn_group_barrier (GEN_INT (3))); } } fixup_errata (); emit_predicate_relation_info (); } /* Return true if REGNO is used by the epilogue. */ int ia64_epilogue_uses (regno) int regno; { switch (regno) { case R_GR (1): /* When a function makes a call through a function descriptor, we will write a (potentially) new value to "gp". After returning from such a call, we need to make sure the function restores the original gp-value, even if the function itself does not use the gp anymore. */ return (TARGET_CONST_GP && !(TARGET_AUTO_PIC || TARGET_NO_PIC)); case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3): case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7): /* For functions defined with the syscall_linkage attribute, all input registers are marked as live at all function exits. This prevents the register allocator from using the input registers, which in turn makes it possible to restart a system call after an interrupt without having to save/restore the input registers. This also prevents kernel data from leaking to application code. */ return lookup_attribute ("syscall_linkage", TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL; case R_BR (0): /* Conditional return patterns can't represent the use of `b0' as the return address, so we force the value live this way. */ return 1; case AR_PFS_REGNUM: /* Likewise for ar.pfs, which is used by br.ret. */ return 1; default: return 0; } } /* Return true if REGNO is used by the frame unwinder. */ int ia64_eh_uses (regno) int regno; { if (! reload_completed) return 0; if (current_frame_info.reg_save_b0 && regno == current_frame_info.reg_save_b0) return 1; if (current_frame_info.reg_save_pr && regno == current_frame_info.reg_save_pr) return 1; if (current_frame_info.reg_save_ar_pfs && regno == current_frame_info.reg_save_ar_pfs) return 1; if (current_frame_info.reg_save_ar_unat && regno == current_frame_info.reg_save_ar_unat) return 1; if (current_frame_info.reg_save_ar_lc && regno == current_frame_info.reg_save_ar_lc) return 1; return 0; } /* For ia64, SYMBOL_REF_FLAG set means that it is a function. We add @ to the name if this goes in small data/bss. We can only put a variable in small data/bss if it is defined in this module or a module that we are statically linked with. We can't check the second condition, but TREE_STATIC gives us the first one. */ /* ??? If we had IPA, we could check the second condition. We could support programmer added section attributes if the variable is not defined in this module. */ /* ??? See the v850 port for a cleaner way to do this. */ /* ??? We could also support own long data here. Generating movl/add/ld8 instead of addl,ld8/ld8. This makes the code bigger, but should make the code faster because there is one less load. This also includes incomplete types which can't go in sdata/sbss. */ static bool ia64_in_small_data_p (exp) tree exp; { if (TARGET_NO_SDATA) return false; if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp)) { const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp)); if (strcmp (section, ".sdata") == 0 || strcmp (section, ".sbss") == 0) return true; } else { HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp)); /* If this is an incomplete type with size 0, then we can't put it in sdata because it might be too big when completed. */ if (size > 0 && size <= ia64_section_threshold) return true; } return false; } static void ia64_encode_section_info (decl, first) tree decl; int first ATTRIBUTE_UNUSED; { const char *symbol_str; bool is_local; rtx symbol; char encoding = 0; if (TREE_CODE (decl) == FUNCTION_DECL) { SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl), 0)) = 1; return; } /* Careful not to prod global register variables. */ if (TREE_CODE (decl) != VAR_DECL || GET_CODE (DECL_RTL (decl)) != MEM || GET_CODE (XEXP (DECL_RTL (decl), 0)) != SYMBOL_REF) return; symbol = XEXP (DECL_RTL (decl), 0); symbol_str = XSTR (symbol, 0); is_local = (*targetm.binds_local_p) (decl); if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL (decl)) encoding = " GLil"[decl_tls_model (decl)]; /* Determine if DECL will wind up in .sdata/.sbss. */ else if (is_local && ia64_in_small_data_p (decl)) encoding = 's'; /* Finally, encode this into the symbol string. */ if (encoding) { char *newstr; size_t len; if (symbol_str[0] == ENCODE_SECTION_INFO_CHAR) { if (encoding == symbol_str[1]) return; /* ??? Sdata became thread or thread becaome not thread. Lose. */ abort (); } len = strlen (symbol_str); newstr = alloca (len + 3); newstr[0] = ENCODE_SECTION_INFO_CHAR; newstr[1] = encoding; memcpy (newstr + 2, symbol_str, len + 1); XSTR (symbol, 0) = ggc_alloc_string (newstr, len + 2); } /* This decl is marked as being in small data/bss but it shouldn't be; one likely explanation for this is that the decl has been moved into a different section from the one it was in when encode_section_info was first called. Remove the encoding. */ else if (symbol_str[0] == ENCODE_SECTION_INFO_CHAR) XSTR (symbol, 0) = ggc_strdup (symbol_str + 2); } static const char * ia64_strip_name_encoding (str) const char *str; { if (str[0] == ENCODE_SECTION_INFO_CHAR) str += 2; if (str[0] == '*') str++; return str; } /* True if it is OK to do sibling call optimization for the specified call expression EXP. DECL will be the called function, or NULL if this is an indirect call. */ bool ia64_function_ok_for_sibcall (decl) tree decl; { /* Direct calls are always ok. */ if (decl) return true; /* If TARGET_CONST_GP is in effect, then our caller expects us to return with our current GP. This means that we'll always have a GP reload after an indirect call. */ return !ia64_epilogue_uses (R_GR (1)); } /* Output assembly directives for prologue regions. */ /* The current basic block number. */ static bool last_block; /* True if we need a copy_state command at the start of the next block. */ static bool need_copy_state; /* The function emits unwind directives for the start of an epilogue. */ static void process_epilogue () { /* If this isn't the last block of the function, then we need to label the current state, and copy it back in at the start of the next block. */ if (!last_block) { fprintf (asm_out_file, "\t.label_state 1\n"); need_copy_state = true; } fprintf (asm_out_file, "\t.restore sp\n"); } /* This function processes a SET pattern looking for specific patterns which result in emitting an assembly directive required for unwinding. */ static int process_set (asm_out_file, pat) FILE *asm_out_file; rtx pat; { rtx src = SET_SRC (pat); rtx dest = SET_DEST (pat); int src_regno, dest_regno; /* Look for the ALLOC insn. */ if (GET_CODE (src) == UNSPEC_VOLATILE && XINT (src, 1) == UNSPECV_ALLOC && GET_CODE (dest) == REG) { dest_regno = REGNO (dest); /* If this isn't the final destination for ar.pfs, the alloc shouldn't have been marked frame related. */ if (dest_regno != current_frame_info.reg_save_ar_pfs) abort (); fprintf (asm_out_file, "\t.save ar.pfs, r%d\n", ia64_dbx_register_number (dest_regno)); return 1; } /* Look for SP = .... */ if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM) { if (GET_CODE (src) == PLUS) { rtx op0 = XEXP (src, 0); rtx op1 = XEXP (src, 1); if (op0 == dest && GET_CODE (op1) == CONST_INT) { if (INTVAL (op1) < 0) { fputs ("\t.fframe ", asm_out_file); fprintf (asm_out_file, HOST_WIDE_INT_PRINT_DEC, -INTVAL (op1)); fputc ('\n', asm_out_file); } else process_epilogue (); } else abort (); } else if (GET_CODE (src) == REG && REGNO (src) == HARD_FRAME_POINTER_REGNUM) process_epilogue (); else abort (); return 1; } /* Register move we need to look at. */ if (GET_CODE (dest) == REG && GET_CODE (src) == REG) { src_regno = REGNO (src); dest_regno = REGNO (dest); switch (src_regno) { case BR_REG (0): /* Saving return address pointer. */ if (dest_regno != current_frame_info.reg_save_b0) abort (); fprintf (asm_out_file, "\t.save rp, r%d\n", ia64_dbx_register_number (dest_regno)); return 1; case PR_REG (0): if (dest_regno != current_frame_info.reg_save_pr) abort (); fprintf (asm_out_file, "\t.save pr, r%d\n", ia64_dbx_register_number (dest_regno)); return 1; case AR_UNAT_REGNUM: if (dest_regno != current_frame_info.reg_save_ar_unat) abort (); fprintf (asm_out_file, "\t.save ar.unat, r%d\n", ia64_dbx_register_number (dest_regno)); return 1; case AR_LC_REGNUM: if (dest_regno != current_frame_info.reg_save_ar_lc) abort (); fprintf (asm_out_file, "\t.save ar.lc, r%d\n", ia64_dbx_register_number (dest_regno)); return 1; case STACK_POINTER_REGNUM: if (dest_regno != HARD_FRAME_POINTER_REGNUM || ! frame_pointer_needed) abort (); fprintf (asm_out_file, "\t.vframe r%d\n", ia64_dbx_register_number (dest_regno)); return 1; default: /* Everything else should indicate being stored to memory. */ abort (); } } /* Memory store we need to look at. */ if (GET_CODE (dest) == MEM && GET_CODE (src) == REG) { long off; rtx base; const char *saveop; if (GET_CODE (XEXP (dest, 0)) == REG) { base = XEXP (dest, 0); off = 0; } else if (GET_CODE (XEXP (dest, 0)) == PLUS && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT) { base = XEXP (XEXP (dest, 0), 0); off = INTVAL (XEXP (XEXP (dest, 0), 1)); } else abort (); if (base == hard_frame_pointer_rtx) { saveop = ".savepsp"; off = - off; } else if (base == stack_pointer_rtx) saveop = ".savesp"; else abort (); src_regno = REGNO (src); switch (src_regno) { case BR_REG (0): if (current_frame_info.reg_save_b0 != 0) abort (); fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off); return 1; case PR_REG (0): if (current_frame_info.reg_save_pr != 0) abort (); fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off); return 1; case AR_LC_REGNUM: if (current_frame_info.reg_save_ar_lc != 0) abort (); fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off); return 1; case AR_PFS_REGNUM: if (current_frame_info.reg_save_ar_pfs != 0) abort (); fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off); return 1; case AR_UNAT_REGNUM: if (current_frame_info.reg_save_ar_unat != 0) abort (); fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off); return 1; case GR_REG (4): case GR_REG (5): case GR_REG (6): case GR_REG (7): fprintf (asm_out_file, "\t.save.g 0x%x\n", 1 << (src_regno - GR_REG (4))); return 1; case BR_REG (1): case BR_REG (2): case BR_REG (3): case BR_REG (4): case BR_REG (5): fprintf (asm_out_file, "\t.save.b 0x%x\n", 1 << (src_regno - BR_REG (1))); return 1; case FR_REG (2): case FR_REG (3): case FR_REG (4): case FR_REG (5): fprintf (asm_out_file, "\t.save.f 0x%x\n", 1 << (src_regno - FR_REG (2))); return 1; case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19): case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23): case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27): case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31): fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n", 1 << (src_regno - FR_REG (12))); return 1; default: return 0; } } return 0; } /* This function looks at a single insn and emits any directives required to unwind this insn. */ void process_for_unwind_directive (asm_out_file, insn) FILE *asm_out_file; rtx insn; { if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)) { rtx pat; if (GET_CODE (insn) == NOTE && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK) { last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR; /* Restore unwind state from immediately before the epilogue. */ if (need_copy_state) { fprintf (asm_out_file, "\t.body\n"); fprintf (asm_out_file, "\t.copy_state 1\n"); need_copy_state = false; } } if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn)) return; pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX); if (pat) pat = XEXP (pat, 0); else pat = PATTERN (insn); switch (GET_CODE (pat)) { case SET: process_set (asm_out_file, pat); break; case PARALLEL: { int par_index; int limit = XVECLEN (pat, 0); for (par_index = 0; par_index < limit; par_index++) { rtx x = XVECEXP (pat, 0, par_index); if (GET_CODE (x) == SET) process_set (asm_out_file, x); } break; } default: abort (); } } } void ia64_init_builtins () { tree psi_type_node = build_pointer_type (integer_type_node); tree pdi_type_node = build_pointer_type (long_integer_type_node); /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */ tree si_ftype_psi_si_si = build_function_type_list (integer_type_node, psi_type_node, integer_type_node, integer_type_node, NULL_TREE); /* __sync_val_compare_and_swap_di, __sync_bool_compare_and_swap_di */ tree di_ftype_pdi_di_di = build_function_type_list (long_integer_type_node, pdi_type_node, long_integer_type_node, long_integer_type_node, NULL_TREE); /* __sync_synchronize */ tree void_ftype_void = build_function_type (void_type_node, void_list_node); /* __sync_lock_test_and_set_si */ tree si_ftype_psi_si = build_function_type_list (integer_type_node, psi_type_node, integer_type_node, NULL_TREE); /* __sync_lock_test_and_set_di */ tree di_ftype_pdi_di = build_function_type_list (long_integer_type_node, pdi_type_node, long_integer_type_node, NULL_TREE); /* __sync_lock_release_si */ tree void_ftype_psi = build_function_type_list (void_type_node, psi_type_node, NULL_TREE); /* __sync_lock_release_di */ tree void_ftype_pdi = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE); #define def_builtin(name, type, code) \ builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE) def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si, IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI); def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di, IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI); def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si, IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI); def_builtin ("__sync_bool_compare_and_swap_di", di_ftype_pdi_di_di, IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI); def_builtin ("__sync_synchronize", void_ftype_void, IA64_BUILTIN_SYNCHRONIZE); def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si, IA64_BUILTIN_LOCK_TEST_AND_SET_SI); def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di, IA64_BUILTIN_LOCK_TEST_AND_SET_DI); def_builtin ("__sync_lock_release_si", void_ftype_psi, IA64_BUILTIN_LOCK_RELEASE_SI); def_builtin ("__sync_lock_release_di", void_ftype_pdi, IA64_BUILTIN_LOCK_RELEASE_DI); def_builtin ("__builtin_ia64_bsp", build_function_type (ptr_type_node, void_list_node), IA64_BUILTIN_BSP); def_builtin ("__builtin_ia64_flushrs", build_function_type (void_type_node, void_list_node), IA64_BUILTIN_FLUSHRS); def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si, IA64_BUILTIN_FETCH_AND_ADD_SI); def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si, IA64_BUILTIN_FETCH_AND_SUB_SI); def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si, IA64_BUILTIN_FETCH_AND_OR_SI); def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si, IA64_BUILTIN_FETCH_AND_AND_SI); def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si, IA64_BUILTIN_FETCH_AND_XOR_SI); def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si, IA64_BUILTIN_FETCH_AND_NAND_SI); def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si, IA64_BUILTIN_ADD_AND_FETCH_SI); def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si, IA64_BUILTIN_SUB_AND_FETCH_SI); def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si, IA64_BUILTIN_OR_AND_FETCH_SI); def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si, IA64_BUILTIN_AND_AND_FETCH_SI); def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si, IA64_BUILTIN_XOR_AND_FETCH_SI); def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si, IA64_BUILTIN_NAND_AND_FETCH_SI); def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di, IA64_BUILTIN_FETCH_AND_ADD_DI); def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di, IA64_BUILTIN_FETCH_AND_SUB_DI); def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di, IA64_BUILTIN_FETCH_AND_OR_DI); def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di, IA64_BUILTIN_FETCH_AND_AND_DI); def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di, IA64_BUILTIN_FETCH_AND_XOR_DI); def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di, IA64_BUILTIN_FETCH_AND_NAND_DI); def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di, IA64_BUILTIN_ADD_AND_FETCH_DI); def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di, IA64_BUILTIN_SUB_AND_FETCH_DI); def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di, IA64_BUILTIN_OR_AND_FETCH_DI); def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di, IA64_BUILTIN_AND_AND_FETCH_DI); def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di, IA64_BUILTIN_XOR_AND_FETCH_DI); def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di, IA64_BUILTIN_NAND_AND_FETCH_DI); #undef def_builtin } /* Expand fetch_and_op intrinsics. The basic code sequence is: mf tmp = [ptr]; do { ret = tmp; ar.ccv = tmp; tmp <op>= value; cmpxchgsz.acq tmp = [ptr], tmp } while (tmp != ret) */ static rtx ia64_expand_fetch_and_op (binoptab, mode, arglist, target) optab binoptab; enum machine_mode mode; tree arglist; rtx target; { rtx ret, label, tmp, ccv, insn, mem, value; tree arg0, arg1; arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); mem = expand_expr (arg0, NULL_RTX, Pmode, 0); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE(mem) != Pmode) mem = convert_memory_address (Pmode, mem); #endif value = expand_expr (arg1, NULL_RTX, mode, 0); mem = gen_rtx_MEM (mode, force_reg (Pmode, mem)); MEM_VOLATILE_P (mem) = 1; if (target && register_operand (target, mode)) ret = target; else ret = gen_reg_rtx (mode); emit_insn (gen_mf ()); /* Special case for fetchadd instructions. */ if (binoptab == add_optab && fetchadd_operand (value, VOIDmode)) { if (mode == SImode) insn = gen_fetchadd_acq_si (ret, mem, value); else insn = gen_fetchadd_acq_di (ret, mem, value); emit_insn (insn); return ret; } tmp = gen_reg_rtx (mode); ccv = gen_rtx_REG (mode, AR_CCV_REGNUM); emit_move_insn (tmp, mem); label = gen_label_rtx (); emit_label (label); emit_move_insn (ret, tmp); emit_move_insn (ccv, tmp); /* Perform the specific operation. Special case NAND by noticing one_cmpl_optab instead. */ if (binoptab == one_cmpl_optab) { tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN); binoptab = and_optab; } tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN); if (mode == SImode) insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv); else insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv); emit_insn (insn); emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label); return ret; } /* Expand op_and_fetch intrinsics. The basic code sequence is: mf tmp = [ptr]; do { old = tmp; ar.ccv = tmp; ret = tmp <op> value; cmpxchgsz.acq tmp = [ptr], ret } while (tmp != old) */ static rtx ia64_expand_op_and_fetch (binoptab, mode, arglist, target) optab binoptab; enum machine_mode mode; tree arglist; rtx target; { rtx old, label, tmp, ret, ccv, insn, mem, value; tree arg0, arg1; arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); mem = expand_expr (arg0, NULL_RTX, Pmode, 0); #ifdef POINTERS_EXTEND_UNSIGNED if (GET_MODE(mem) != Pmode) mem = convert_memory_address (Pmode, mem); #endif value = expand_expr (arg1, NULL_RTX, mode, 0); mem = gen_rtx_MEM (mode, force_reg (Pmode, mem)); MEM_VOLATILE_P (mem) = 1; if (target && ! register_operand (target, mode)) target = NULL_RTX; emit_insn (gen_mf ()); tmp = gen_reg_rtx (mode); old = gen_reg_rtx (mode); ccv = gen_rtx_REG (mode, AR_CCV_REGNUM); emit_move_insn (tmp, mem); label = gen_label_rtx (); emit_label (label); emit_move_insn (old, tmp); emit_move_insn (ccv, tmp); /* Perform the specific operation. Special case NAND by noticing one_cmpl_optab instead. */ if (binoptab == one_cmpl_optab) { tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN); binoptab = and_optab; } ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN); if (mode == SImode) insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv); else insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv); emit_insn (insn); emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label); return ret; } /* Expand val_ and bool_compare_and_swap. For val_ we want: ar.ccv = oldval mf cmpxchgsz.acq ret = [ptr], newval, ar.ccv return ret For bool_ it's the same except return ret == oldval. */ static rtx ia64_expand_compare_and_swap (mode, boolp, arglist, target) enum machine_mode mode; int boolp; tree arglist; rtx target; { tree arg0, arg1, arg2; rtx mem, old, new, ccv, tmp, insn; arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist))); mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0); old = expand_expr (arg1, NULL_RTX, mode, 0); new = expand_expr (arg2, NULL_RTX, mode, 0); mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem)); MEM_VOLATILE_P (mem) = 1; if (! register_operand (old, mode)) old = copy_to_mode_reg (mode, old); if (! register_operand (new, mode)) new = copy_to_mode_reg (mode, new); if (! boolp && target && register_operand (target, mode)) tmp = target; else tmp = gen_reg_rtx (mode); ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM); if (mode == DImode) emit_move_insn (ccv, old); else { rtx ccvtmp = gen_reg_rtx (DImode); emit_insn (gen_zero_extendsidi2 (ccvtmp, old)); emit_move_insn (ccv, ccvtmp); } emit_insn (gen_mf ()); if (mode == SImode) insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv); else insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv); emit_insn (insn); if (boolp) { if (! target) target = gen_reg_rtx (mode); return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1); } else return tmp; } /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */ static rtx ia64_expand_lock_test_and_set (mode, arglist, target) enum machine_mode mode; tree arglist; rtx target; { tree arg0, arg1; rtx mem, new, ret, insn; arg0 = TREE_VALUE (arglist); arg1 = TREE_VALUE (TREE_CHAIN (arglist)); mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0); new = expand_expr (arg1, NULL_RTX, mode, 0); mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem)); MEM_VOLATILE_P (mem) = 1; if (! register_operand (new, mode)) new = copy_to_mode_reg (mode, new); if (target && register_operand (target, mode)) ret = target; else ret = gen_reg_rtx (mode); if (mode == SImode) insn = gen_xchgsi (ret, mem, new); else insn = gen_xchgdi (ret, mem, new); emit_insn (insn); return ret; } /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */ static rtx ia64_expand_lock_release (mode, arglist, target) enum machine_mode mode; tree arglist; rtx target ATTRIBUTE_UNUSED; { tree arg0; rtx mem; arg0 = TREE_VALUE (arglist); mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0); mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem)); MEM_VOLATILE_P (mem) = 1; emit_move_insn (mem, const0_rtx); return const0_rtx; } rtx ia64_expand_builtin (exp, target, subtarget, mode, ignore) tree exp; rtx target; rtx subtarget ATTRIBUTE_UNUSED; enum machine_mode mode ATTRIBUTE_UNUSED; int ignore ATTRIBUTE_UNUSED; { tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0); unsigned int fcode = DECL_FUNCTION_CODE (fndecl); tree arglist = TREE_OPERAND (exp, 1); switch (fcode) { case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI: case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI: case IA64_BUILTIN_LOCK_TEST_AND_SET_SI: case IA64_BUILTIN_LOCK_RELEASE_SI: case IA64_BUILTIN_FETCH_AND_ADD_SI: case IA64_BUILTIN_FETCH_AND_SUB_SI: case IA64_BUILTIN_FETCH_AND_OR_SI: case IA64_BUILTIN_FETCH_AND_AND_SI: case IA64_BUILTIN_FETCH_AND_XOR_SI: case IA64_BUILTIN_FETCH_AND_NAND_SI: case IA64_BUILTIN_ADD_AND_FETCH_SI: case IA64_BUILTIN_SUB_AND_FETCH_SI: case IA64_BUILTIN_OR_AND_FETCH_SI: case IA64_BUILTIN_AND_AND_FETCH_SI: case IA64_BUILTIN_XOR_AND_FETCH_SI: case IA64_BUILTIN_NAND_AND_FETCH_SI: mode = SImode; break; case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI: case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI: case IA64_BUILTIN_LOCK_TEST_AND_SET_DI: case IA64_BUILTIN_LOCK_RELEASE_DI: case IA64_BUILTIN_FETCH_AND_ADD_DI: case IA64_BUILTIN_FETCH_AND_SUB_DI: case IA64_BUILTIN_FETCH_AND_OR_DI: case IA64_BUILTIN_FETCH_AND_AND_DI: case IA64_BUILTIN_FETCH_AND_XOR_DI: case IA64_BUILTIN_FETCH_AND_NAND_DI: case IA64_BUILTIN_ADD_AND_FETCH_DI: case IA64_BUILTIN_SUB_AND_FETCH_DI: case IA64_BUILTIN_OR_AND_FETCH_DI: case IA64_BUILTIN_AND_AND_FETCH_DI: case IA64_BUILTIN_XOR_AND_FETCH_DI: case IA64_BUILTIN_NAND_AND_FETCH_DI: mode = DImode; break; default: break; } switch (fcode) { case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI: case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI: return ia64_expand_compare_and_swap (mode, 1, arglist, target); case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI: case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI: return ia64_expand_compare_and_swap (mode, 0, arglist, target); case IA64_BUILTIN_SYNCHRONIZE: emit_insn (gen_mf ()); return const0_rtx; case IA64_BUILTIN_LOCK_TEST_AND_SET_SI: case IA64_BUILTIN_LOCK_TEST_AND_SET_DI: return ia64_expand_lock_test_and_set (mode, arglist, target); case IA64_BUILTIN_LOCK_RELEASE_SI: case IA64_BUILTIN_LOCK_RELEASE_DI: return ia64_expand_lock_release (mode, arglist, target); case IA64_BUILTIN_BSP: if (! target || ! register_operand (target, DImode)) target = gen_reg_rtx (DImode); emit_insn (gen_bsp_value (target)); return target; case IA64_BUILTIN_FLUSHRS: emit_insn (gen_flushrs ()); return const0_rtx; case IA64_BUILTIN_FETCH_AND_ADD_SI: case IA64_BUILTIN_FETCH_AND_ADD_DI: return ia64_expand_fetch_and_op (add_optab, mode, arglist, target); case IA64_BUILTIN_FETCH_AND_SUB_SI: case IA64_BUILTIN_FETCH_AND_SUB_DI: return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target); case IA64_BUILTIN_FETCH_AND_OR_SI: case IA64_BUILTIN_FETCH_AND_OR_DI: return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target); case IA64_BUILTIN_FETCH_AND_AND_SI: case IA64_BUILTIN_FETCH_AND_AND_DI: return ia64_expand_fetch_and_op (and_optab, mode, arglist, target); case IA64_BUILTIN_FETCH_AND_XOR_SI: case IA64_BUILTIN_FETCH_AND_XOR_DI: return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target); case IA64_BUILTIN_FETCH_AND_NAND_SI: case IA64_BUILTIN_FETCH_AND_NAND_DI: return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target); case IA64_BUILTIN_ADD_AND_FETCH_SI: case IA64_BUILTIN_ADD_AND_FETCH_DI: return ia64_expand_op_and_fetch (add_optab, mode, arglist, target); case IA64_BUILTIN_SUB_AND_FETCH_SI: case IA64_BUILTIN_SUB_AND_FETCH_DI: return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target); case IA64_BUILTIN_OR_AND_FETCH_SI: case IA64_BUILTIN_OR_AND_FETCH_DI: return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target); case IA64_BUILTIN_AND_AND_FETCH_SI: case IA64_BUILTIN_AND_AND_FETCH_DI: return ia64_expand_op_and_fetch (and_optab, mode, arglist, target); case IA64_BUILTIN_XOR_AND_FETCH_SI: case IA64_BUILTIN_XOR_AND_FETCH_DI: return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target); case IA64_BUILTIN_NAND_AND_FETCH_SI: case IA64_BUILTIN_NAND_AND_FETCH_DI: return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target); default: break; } return NULL_RTX; } /* For the HP-UX IA64 aggregate parameters are passed stored in the most significant bits of the stack slot. */ enum direction ia64_hpux_function_arg_padding (mode, type) enum machine_mode mode; tree type; { /* Exception to normal case for structures/unions/etc. */ if (type && AGGREGATE_TYPE_P (type) && int_size_in_bytes (type) < UNITS_PER_WORD) return upward; /* This is the standard FUNCTION_ARG_PADDING with !BYTES_BIG_ENDIAN hardwired to be true. */ return((mode == BLKmode ? (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST && int_size_in_bytes (type) < (PARM_BOUNDARY / BITS_PER_UNIT)) : GET_MODE_BITSIZE (mode) < PARM_BOUNDARY) ? downward : upward); } /* Linked list of all external functions that are to be emitted by GCC. We output the name if and only if TREE_SYMBOL_REFERENCED is set in order to avoid putting out names that are never really used. */ struct extern_func_list { struct extern_func_list *next; /* next external */ char *name; /* name of the external */ } *extern_func_head = 0; static void ia64_hpux_add_extern_decl (name) const char *name; { struct extern_func_list *p; p = (struct extern_func_list *) xmalloc (sizeof (struct extern_func_list)); p->name = xmalloc (strlen (name) + 1); strcpy(p->name, name); p->next = extern_func_head; extern_func_head = p; } /* Print out the list of used global functions. */ void ia64_hpux_asm_file_end (file) FILE *file; { while (extern_func_head) { const char *real_name; tree decl; real_name = (* targetm.strip_name_encoding) (extern_func_head->name); decl = maybe_get_identifier (real_name); if (!decl || (! TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (decl))) { if (decl) TREE_ASM_WRITTEN (decl) = 1; (*targetm.asm_out.globalize_label) (file, extern_func_head->name); fprintf (file, "%s", TYPE_ASM_OP); assemble_name (file, extern_func_head->name); putc (',', file); fprintf (file, TYPE_OPERAND_FMT, "function"); putc ('\n', file); } extern_func_head = extern_func_head->next; } } /* Switch to the section to which we should output X. The only thing special we do here is to honor small data. */ static void ia64_select_rtx_section (mode, x, align) enum machine_mode mode; rtx x; unsigned HOST_WIDE_INT align; { if (GET_MODE_SIZE (mode) > 0 && GET_MODE_SIZE (mode) <= ia64_section_threshold) sdata_section (); else default_elf_select_rtx_section (mode, x, align); } /* It is illegal to have relocations in shared segments on AIX and HPUX. Pretend flag_pic is always set. */ static void ia64_rwreloc_select_section (exp, reloc, align) tree exp; int reloc; unsigned HOST_WIDE_INT align; { default_elf_select_section_1 (exp, reloc, align, true); } static void ia64_rwreloc_unique_section (decl, reloc) tree decl; int reloc; { default_unique_section_1 (decl, reloc, true); } static void ia64_rwreloc_select_rtx_section (mode, x, align) enum machine_mode mode; rtx x; unsigned HOST_WIDE_INT align; { int save_pic = flag_pic; flag_pic = 1; ia64_select_rtx_section (mode, x, align); flag_pic = save_pic; } static unsigned int ia64_rwreloc_section_type_flags (decl, name, reloc) tree decl; const char *name; int reloc; { return default_section_type_flags_1 (decl, name, reloc, true); } /* Output the assembler code for a thunk function. THUNK_DECL is the declaration for the thunk function itself, FUNCTION is the decl for the target function. DELTA is an immediate constant offset to be added to THIS. If VCALL_OFFSET is non-zero, the word at *(*this + vcall_offset) should be added to THIS. */ static void ia64_output_mi_thunk (file, thunk, delta, vcall_offset, function) FILE *file; tree thunk ATTRIBUTE_UNUSED; HOST_WIDE_INT delta; HOST_WIDE_INT vcall_offset; tree function; { rtx this, insn, funexp; reload_completed = 1; no_new_pseudos = 1; /* Set things up as ia64_expand_prologue might. */ last_scratch_gr_reg = 15; memset (&current_frame_info, 0, sizeof (current_frame_info)); current_frame_info.spill_cfa_off = -16; current_frame_info.n_input_regs = 1; current_frame_info.need_regstk = (TARGET_REG_NAMES != 0); if (!TARGET_REG_NAMES) reg_names[IN_REG (0)] = ia64_reg_numbers[0]; /* Mark the end of the (empty) prologue. */ emit_note (NULL, NOTE_INSN_PROLOGUE_END); this = gen_rtx_REG (Pmode, IN_REG (0)); /* Apply the constant offset, if required. */ if (delta) { rtx delta_rtx = GEN_INT (delta); if (!CONST_OK_FOR_I (delta)) { rtx tmp = gen_rtx_REG (Pmode, 2); emit_move_insn (tmp, delta_rtx); delta_rtx = tmp; } emit_insn (gen_adddi3 (this, this, delta_rtx)); } /* Apply the offset from the vtable, if required. */ if (vcall_offset) { rtx vcall_offset_rtx = GEN_INT (vcall_offset); rtx tmp = gen_rtx_REG (Pmode, 2); emit_move_insn (tmp, gen_rtx_MEM (Pmode, this)); if (!CONST_OK_FOR_J (vcall_offset)) { rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ()); emit_move_insn (tmp2, vcall_offset_rtx); vcall_offset_rtx = tmp2; } emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx)); emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp)); emit_insn (gen_adddi3 (this, this, tmp)); } /* Generate a tail call to the target function. */ if (! TREE_USED (function)) { assemble_external (function); TREE_USED (function) = 1; } funexp = XEXP (DECL_RTL (function), 0); funexp = gen_rtx_MEM (FUNCTION_MODE, funexp); ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1); insn = get_last_insn (); SIBLING_CALL_P (insn) = 1; /* Code generation for calls relies on splitting. */ reload_completed = 1; try_split (PATTERN (insn), insn, 0); emit_barrier (); /* Run just enough of rest_of_compilation to get the insns emitted. There's not really enough bulk here to make other passes such as instruction scheduling worth while. Note that use_thunk calls assemble_start_function and assemble_end_function. */ insn = get_insns (); emit_all_insn_group_barriers (NULL, insn); shorten_branches (insn); final_start_function (insn, file, 1); final (insn, file, 1, 0); final_end_function (); reload_completed = 0; no_new_pseudos = 0; } #include "gt-ia64.h"
28.655217
93
0.657943
[ "vector", "solid" ]
bd18589a2cdba989dbe9bc95431b2b192f079aaa
2,408
h
C
Scripts/Template/Headers/java/lang/NullPointerException.h
mbeloded/J2ObjC-FrameworkMb
35dc7171cb7d08edf6824ed3c9f34311c8be3347
[ "MIT" ]
null
null
null
Scripts/Template/Headers/java/lang/NullPointerException.h
mbeloded/J2ObjC-FrameworkMb
35dc7171cb7d08edf6824ed3c9f34311c8be3347
[ "MIT" ]
null
null
null
Scripts/Template/Headers/java/lang/NullPointerException.h
mbeloded/J2ObjC-FrameworkMb
35dc7171cb7d08edf6824ed3c9f34311c8be3347
[ "MIT" ]
null
null
null
// // Generated by the J2ObjC translator. DO NOT EDIT! // source: android/libcore/luni/src/main/java/java/lang/NullPointerException.java // #include "../../J2ObjC_header.h" #pragma push_macro("JavaLangNullPointerException_INCLUDE_ALL") #ifdef JavaLangNullPointerException_RESTRICT #define JavaLangNullPointerException_INCLUDE_ALL 0 #else #define JavaLangNullPointerException_INCLUDE_ALL 1 #endif #undef JavaLangNullPointerException_RESTRICT #pragma clang diagnostic push #pragma GCC diagnostic ignored "-Wdeprecated-declarations" #if !defined (JavaLangNullPointerException_) && (JavaLangNullPointerException_INCLUDE_ALL || defined(JavaLangNullPointerException_INCLUDE)) #define JavaLangNullPointerException_ #define JavaLangRuntimeException_RESTRICT 1 #define JavaLangRuntimeException_INCLUDE 1 #include "../../java/lang/RuntimeException.h" /*! @brief Thrown when a program tries to access a field or method of an object or an element of an array when there is no instance or array to use, that is if the object or array points to <code>null</code>. It also occurs in some other, less obvious circumstances, like a <code>throw e</code> statement where the <code>Throwable</code> reference is <code>null</code>. */ @interface JavaLangNullPointerException : JavaLangRuntimeException #pragma mark Public /*! @brief Constructs a new <code>NullPointerException</code> that includes the current stack trace. */ - (instancetype)init; /*! @brief Constructs a new <code>NullPointerException</code> with the current stack trace and the specified detail message. @param detailMessage the detail message for this exception. */ - (instancetype)initWithNSString:(NSString *)detailMessage; @end J2OBJC_EMPTY_STATIC_INIT(JavaLangNullPointerException) FOUNDATION_EXPORT void JavaLangNullPointerException_init(JavaLangNullPointerException *self); FOUNDATION_EXPORT JavaLangNullPointerException *new_JavaLangNullPointerException_init() NS_RETURNS_RETAINED; FOUNDATION_EXPORT void JavaLangNullPointerException_initWithNSString_(JavaLangNullPointerException *self, NSString *detailMessage); FOUNDATION_EXPORT JavaLangNullPointerException *new_JavaLangNullPointerException_initWithNSString_(NSString *detailMessage) NS_RETURNS_RETAINED; J2OBJC_TYPE_LITERAL_HEADER(JavaLangNullPointerException) #endif #pragma clang diagnostic pop #pragma pop_macro("JavaLangNullPointerException_INCLUDE_ALL")
33.915493
144
0.828904
[ "object" ]
bd2589888a703c7c5ceeb8958fd9f4fdfa427fcb
11,897
h
C
libs/future/include/future/details/whenAllInl.h
jurocha-ms/Mso
2612c527901acca0aa44e80d6ab1337add7129e5
[ "MIT" ]
38
2019-11-22T21:52:57.000Z
2021-05-10T17:02:32.000Z
libs/future/include/future/details/whenAllInl.h
jurocha-ms/Mso
2612c527901acca0aa44e80d6ab1337add7129e5
[ "MIT" ]
21
2019-11-22T19:52:48.000Z
2021-05-11T18:14:22.000Z
libs/future/include/future/details/whenAllInl.h
jurocha-ms/Mso
2612c527901acca0aa44e80d6ab1337add7129e5
[ "MIT" ]
18
2019-11-22T19:55:00.000Z
2021-04-07T11:41:53.000Z
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. // We do not use pragma once because the file is empty if FUTURE_INLINE_DEFS is not defined #ifdef MSO_FUTURE_INLINE_DEFS #ifndef MSO_FUTURE_DETAILS_WHENALLINL_H #define MSO_FUTURE_DETAILS_WHENALLINL_H namespace Mso { namespace Futures { template <class T> struct RawOrCntPtr { RawOrCntPtr() = default; RawOrCntPtr(const RawOrCntPtr&) = delete; RawOrCntPtr& operator=(const RawOrCntPtr&) = delete; RawOrCntPtr(T* ptr) noexcept : m_ptr((reinterpret_cast<uintptr_t>(ptr) >> 1) | (static_cast<uintptr_t>(1) << (sizeof(uintptr_t) * 8 - 1))) { } ~RawOrCntPtr() noexcept { if (!IsRawPtr()) { T* ptr = Get(); if (ptr) { ptr->Release(); } } m_ptr = 0; } T* Get() const noexcept { return const_cast<T*>(reinterpret_cast<const T*>(m_ptr << 1)); } bool IsRawPtr() const noexcept { return (m_ptr >> (sizeof(uintptr_t) * 8 - 1)) != 0; } void ConvertToCntPtr() noexcept { if (IsRawPtr()) { T* ptr = Get(); if (ptr) { ptr->AddRef(); m_ptr = reinterpret_cast<uintptr_t>(ptr) >> 1; } } } private: uintptr_t m_ptr{0}; }; template <class T> struct WhenAllFutureTask { std::atomic<uint32_t> CompleteCount; uint32_t FutureCount; // List of pointers to parent futures. We use these pointers to copy values when setting result value. // This field must be the last one in the struct because we assume that other array elements are allocated after the // WhenAllFutureTask struct. Also, after this array we allocate an array for result values. RawOrCntPtr<Mso::Futures::IFuture> ParentFutures[1]; // Used by Mso::WhenAll that returns tuple. We only have specialization for void type T. WhenAllFutureTask(Mso::Futures::IFuture* futureState, std::initializer_list<Mso::Futures::IFuture*> init) noexcept; WhenAllFutureTask() = delete; ~WhenAllFutureTask() = delete; static constexpr size_t GetAlignedSize(size_t size) noexcept { return (size + std::alignment_of<T>::value - 1) & ~(std::alignment_of<T>::value - 1); } static constexpr size_t GetTaskSize(size_t futureCount) noexcept { return (futureCount > 0) ? GetAlignedSize(sizeof(WhenAllFutureTask) + (futureCount - 1) * sizeof(RawOrCntPtr<Mso::Futures::IFuture>)) + futureCount * sizeof(T) : sizeof(WhenAllFutureTask); } T* GetValuePtr() noexcept { T* ptr = reinterpret_cast<T*>( reinterpret_cast<uint8_t*>(this) + GetAlignedSize(sizeof(WhenAllFutureTask) + (FutureCount - 1) * sizeof(RawOrCntPtr<Mso::Futures::IFuture>))); VerifyElseCrashSzTag( (reinterpret_cast<uintptr_t>(ptr) & (std::alignment_of<T>::value - 1)) == 0, "WhenAll value is not aligned", 0x016056db /* tag_byf11 */); return ptr; } static void Destroy(const ByteArrayView& obj) noexcept { auto task = static_cast<const WhenAllFutureTask*>(obj.VoidData()); VerifyElseCrashTag(obj.Size() == GetTaskSize(task->FutureCount), 0x016056dc /* tag_byf12 */); for (uint32_t i = 0; i < task->FutureCount; ++i) { (&(task->ParentFutures[0]) + i)->~RawOrCntPtr(); } // Check if all futures were successful and we have stored result array. if (task->CompleteCount == task->FutureCount) { T* valuePtr = const_cast<WhenAllFutureTask*>(task)->GetValuePtr(); for (size_t i = 0; i < task->FutureCount; ++i) { valuePtr[i].~T(); } } } }; template <> struct WhenAllFutureTask<void> { std::atomic<uint32_t> CompleteCount; uint32_t FutureCount; // List of pointers to parent futures. We use these pointers to copy values when setting result value. // This field must be the last one in the struct because we assume that other array elements are allocated after the // WhenAllFutureTask struct. We do not store result array because type is void. RawOrCntPtr<Mso::Futures::IFuture> ParentFutures[1]; // Used by Mso::WhenAll that returns tuple. LIBLET_PUBLICAPI WhenAllFutureTask( Mso::Futures::IFuture* futureState, std::initializer_list<Mso::Futures::IFuture*> init) noexcept; WhenAllFutureTask() = delete; ~WhenAllFutureTask() = delete; static constexpr size_t GetTaskSize(size_t futureCount) noexcept { return (futureCount > 0) ? sizeof(WhenAllFutureTask) + (futureCount - 1) * sizeof(RawOrCntPtr<Mso::Futures::IFuture>) : sizeof(WhenAllFutureTask); } LIBLET_PUBLICAPI static void Destroy(const ByteArrayView& obj) noexcept; }; // ValueTraits specialization to enable use of WhenAllFutureTask in FutureTraitsProvider. template <class T> struct ValueTraits<WhenAllFutureTask<T>, false> { constexpr static FutureDestroyCallback* DestroyPtr = &WhenAllFutureTask<T>::Destroy; }; template <class T> struct WhenAllTaskInvoke { static void Invoke(const ByteArrayView& taskBuffer, _In_ IFuture* future, _In_ IFuture* parentFuture) noexcept { auto task = reinterpret_cast<WhenAllFutureTask<T>*>(taskBuffer.VoidData()); VerifyElseCrashTag( taskBuffer.Size() == WhenAllFutureTask<T>::GetTaskSize(task->FutureCount), 0x016056dd /* tag_byf13 */); bool found = false; for (size_t i = 0; i < task->FutureCount; ++i) { auto& storedFuture = task->ParentFutures[i]; if (parentFuture == storedFuture.Get()) { storedFuture.ConvertToCntPtr(); found = true; break; } } VerifyElseCrashSzTag(found, "parent future is not found", 0x012ca410 /* tag_blkqq */); if (++task->CompleteCount == task->FutureCount) { // All parent futures completed: copy results to the WhenAllFutureTask value storage. ByteArrayView valueBuffer; (void)future->TryStartSetValue(/*ref*/ valueBuffer, /*crashIfFailed:*/ true); T* valuePtr = task->GetValuePtr(); for (size_t i = 0; i < task->FutureCount; ++i) { ::new (std::addressof(valuePtr[i])) T(std::move(*reinterpret_cast<T*>(task->ParentFutures[i].Get()->GetValue().VoidData()))); } ::new (valueBuffer.VoidData()) Mso::Async::ArrayView<T>(valuePtr, task->FutureCount); (void)future->TrySetSuccess(/*crashIfFailed:*/ true); } } }; template <> struct WhenAllTaskInvoke<void> { LIBLET_PUBLICAPI _Callback_ static void Invoke(const ByteArrayView& taskBuffer, _In_ IFuture* future, _In_ IFuture* parentFuture) noexcept; }; struct WhenAllTaskCatch { WhenAllTaskCatch() = delete; ~WhenAllTaskCatch() = delete; LIBLET_PUBLICAPI static void Catch(const ByteArrayView& taskBuffer, IFuture* future, ErrorCode&& parentError) noexcept; constexpr static FutureCatchCallback* CatchPtr = &Catch; }; template <class... Ts, size_t... I> _Callback_ inline void CreateTuple( ByteArrayView& valueBuffer, RawOrCntPtr<Mso::Futures::IFuture>* futures, std::integer_sequence<size_t, I...>) noexcept { ::new (valueBuffer.VoidData()) std::tuple<Ts...>(std::move(*reinterpret_cast<Ts*>(futures[I].Get()->GetValue().VoidData()))...); } template <class... Ts> struct WhenAllTupleTaskInvoke { static void Invoke(const ByteArrayView& taskBuffer, _In_ IFuture* future, _In_ IFuture* parentFuture) noexcept { constexpr const size_t futureCount = sizeof...(Ts); constexpr const size_t taskSize = WhenAllFutureTask<void>::GetTaskSize(futureCount); auto task = reinterpret_cast<WhenAllFutureTask<void>*>(taskBuffer.VoidDataChecked(taskSize)); bool found = false; for (size_t i = 0; i < futureCount; ++i) { auto& storedFuture = task->ParentFutures[i]; if (parentFuture == storedFuture.Get()) { storedFuture.ConvertToCntPtr(); found = true; break; } } VerifyElseCrashSzTag(found, "parent future is not found", 0x012ca412 /* tag_blkqs */); if (++task->CompleteCount == futureCount) { ByteArrayView valueBuffer; (void)future->TryStartSetValue(/*ref*/ valueBuffer, /*crashIfFailed:*/ true); CreateTuple<Ts...>(/*ref*/ valueBuffer, task->ParentFutures, std::make_index_sequence<futureCount>()); (void)future->TrySetSuccess(/*crashIfFailed:*/ true); } } }; } // namespace Futures template <class T> inline Future<Mso::Async::ArrayView<T>> WhenAll(Mso::Async::ArrayView<Future<T>> futures) noexcept { if (futures.Size() == 0) { return MakeSucceededFutureEmplaced<Mso::Async::ArrayView<T>>(); } using TaskType = Mso::Futures::WhenAllFutureTask<T>; // Along with TaskType we allocate space for pointers to parent futures, and the array of results. const size_t taskSize = TaskType::GetTaskSize(futures.Size()); constexpr const auto& futureTraits = Mso::Futures::FutureTraitsProvider< /*Options: */ Mso::Futures::FutureOptions::IsMultiPost, /*ResultType: */ Mso::Async::ArrayView<T>, /*TaskType: */ TaskType, /*PostType: */ void, /*InvokeType: */ Mso::Futures::WhenAllTaskInvoke<T>, /*CatchType: */ Mso::Futures::WhenAllTaskCatch>::Traits; Mso::Futures::ByteArrayView taskBuffer; Mso::CntPtr<Mso::Futures::IFuture> whenAllFuture = Mso::Futures::MakeFuture(futureTraits, taskSize, &taskBuffer); TaskType* task = reinterpret_cast<TaskType*>(taskBuffer.VoidDataChecked(taskSize)); task->CompleteCount = 0; task->FutureCount = static_cast<uint32_t>(futures.Size()); size_t i = 0; for (const Future<T>& parentFuture : futures) { // Set raw pointer to parent futures. ::new (&task->ParentFutures[i++]) Mso::Futures::RawOrCntPtr<Mso::Futures::IFuture>(Mso::GetIFuture(parentFuture)); } // Use a separate loop to add whenAllFuture to the parent futures because parent futures may start // invoke our whenAllFuture while we still in this function. for (const Future<T>& parentFuture : futures) { Mso::GetIFuture(parentFuture)->AddContinuation(Mso::CntPtr{whenAllFuture}); } return Future<Mso::Async::ArrayView<T>>(std::move(whenAllFuture)); } template <class T> inline Future<Mso::Async::ArrayView<T>> WhenAll(std::initializer_list<Future<T>> futures) noexcept { return WhenAll(Mso::Async::ArrayView<Future<T>>(futures)); } template <class T, size_t size> inline Future<Mso::Async::ArrayView<T>> WhenAll(Future<T> (&futures)[size]) noexcept { return WhenAll(Mso::Async::ArrayView<Future<T>>(futures)); } template <class T> inline Future<Mso::Async::ArrayView<T>> WhenAll(const std::vector<Mso::Future<T>>& futures) noexcept { return WhenAll(Mso::Async::ArrayView<Future<T>>(futures.data(), futures.size())); } template <size_t size> inline Future<void> WhenAll(Future<void> (&futures)[size]) noexcept { return WhenAll(Mso::Async::ArrayView<Future<void>>(futures)); } template <class T0, class... Ts> Future<std::tuple<T0, Ts...>> WhenAll(const Future<T0>& future0, const Future<Ts>&... futures) noexcept { using ResultType = std::tuple<T0, Ts...>; using TaskType = Mso::Futures::WhenAllFutureTask<void>; const size_t taskSize = TaskType::GetTaskSize(1 + sizeof...(Ts)); constexpr const auto& futureTraits = Mso::Futures::FutureTraitsProvider< /*Options: */ Mso::Futures::FutureOptions::IsMultiPost, /*ResultType: */ ResultType, /*TaskType: */ TaskType, /*PostType: */ void, /*InvokeType: */ Mso::Futures::WhenAllTupleTaskInvoke<T0, Ts...>, /*AbandonType: */ Mso::Futures::WhenAllTaskCatch>::Traits; Mso::Futures::ByteArrayView taskBuffer; Mso::CntPtr<Mso::Futures::IFuture> whenAllFuture = Mso::Futures::MakeFuture(futureTraits, taskSize, &taskBuffer); ::new (taskBuffer.VoidData()) TaskType(whenAllFuture.Get(), {Mso::GetIFuture(future0), Mso::GetIFuture(futures)...}); return Future<ResultType>(std::move(whenAllFuture)); } } // namespace Mso #endif // MSO_FUTURE_DETAILS_WHENALLINL_H #endif // MSO_FUTURE_INLINE_DEFS
32.955679
119
0.685551
[ "vector" ]
bd289419a6476f35223fb57559375fb1eec0b9fe
4,334
h
C
jni/proj4/src/proj_internal.h
maru2020/RtkGps
11bd70de57c1548f2b76a7ded06fc92057e21863
[ "BSD-2-Clause" ]
162
2015-01-11T09:30:42.000Z
2022-03-31T20:12:33.000Z
jni/proj4/src/proj_internal.h
maru2020/RtkGps
11bd70de57c1548f2b76a7ded06fc92057e21863
[ "BSD-2-Clause" ]
41
2015-02-10T10:23:32.000Z
2021-11-18T03:14:48.000Z
jni/proj4/src/proj_internal.h
maru2020/RtkGps
11bd70de57c1548f2b76a7ded06fc92057e21863
[ "BSD-2-Clause" ]
115
2015-01-24T21:08:29.000Z
2022-02-12T08:58:09.000Z
/****************************************************************************** * Project: PROJ.4 * Purpose: Internal plumbing for the PROJ.4 library. * * Author: Thomas Knudsen, <thokn@sdfe.dk> * ****************************************************************************** * Copyright (c) 2016, 2017, Thomas Knudsen / SDFE * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO COORD SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. *****************************************************************************/ #ifdef _MSC_VER #ifndef _USE_MATH_DEFINES #define _USE_MATH_DEFINES #endif #endif #include <math.h> /* For M_PI */ #include <stddef.h> #include "proj.h" #ifdef PROJECTS_H #error proj_internal.h must be included before projects.h #endif #ifdef PROJ_API_H #error proj_internal.h must be included before proj_api.h #endif #ifndef PROJ_INTERNAL_H #define PROJ_INTERNAL_H #ifdef __cplusplus extern "C" { #endif #define STATIC_ASSERT(COND) ((void)sizeof(char[(COND) ? 1 : -1])) #if !defined(HAVE_C99_MATH) #define HAVE_C99_MATH 0 #endif #ifndef PJ_TODEG #define PJ_TODEG(rad) ((rad)*180.0/M_PI) #endif #ifndef PJ_TORAD #define PJ_TORAD(deg) ((deg)*M_PI/180.0) #endif /* Maximum latitudinal overshoot accepted */ #define PJ_EPS_LAT 1e-12 /* This enum is also conditionally defined in projects.h - but enums cannot */ /* be forward declared and we need it here for the pj_left/right prototypes */ enum pj_io_units { PJ_IO_UNITS_WHATEVER = 0, /* Doesn't matter (or depends on pipeline neighbours) */ PJ_IO_UNITS_CLASSIC = 1, /* Scaled meters (right), projected system */ PJ_IO_UNITS_PROJECTED = 2, /* Meters, projected system */ PJ_IO_UNITS_CARTESIAN = 3, /* Meters, 3D cartesian system */ PJ_IO_UNITS_ANGULAR = 4 /* Radians */ }; enum pj_io_units pj_left (PJ *P); enum pj_io_units pj_right (PJ *P); PJ_COORD proj_coord_error (void); void proj_context_errno_set (PJ_CONTEXT *ctx, int err); void proj_context_set (PJ *P, PJ_CONTEXT *ctx); void proj_context_inherit (PJ *parent, PJ *child); PJ_COORD pj_fwd4d (PJ_COORD coo, PJ *P); PJ_COORD pj_inv4d (PJ_COORD coo, PJ *P); PJ_COORD pj_approx_2D_trans (PJ *P, PJ_DIRECTION direction, PJ_COORD coo); PJ_COORD pj_approx_3D_trans (PJ *P, PJ_DIRECTION direction, PJ_COORD coo); /* Grid functionality */ int proj_vgrid_init(PJ *P, const char *grids); int proj_hgrid_init(PJ *P, const char *grids); double proj_vgrid_value(PJ *P, PJ_LP lp); PJ_LP proj_hgrid_value(PJ *P, PJ_LP lp); PJ_LP proj_hgrid_apply(PJ *P, PJ_LP lp, PJ_DIRECTION direction); void proj_log_error (PJ *P, const char *fmt, ...); void proj_log_debug (PJ *P, const char *fmt, ...); void proj_log_trace (PJ *P, const char *fmt, ...); int pj_ellipsoid (PJ *); void pj_inherit_ellipsoid_def (const PJ *src, PJ *dst); void pj_erase_ellipsoid_def (PJ *P); int pj_calc_ellipsoid_params (PJ *P, double a, double es); char *pj_chomp (char *c); char *pj_shrink (char *c); size_t pj_trim_argc (char *args); char **pj_trim_argv (size_t argc, char *args); char *pj_make_args (size_t argc, char **argv); /* Lowest level: Minimum support for fileapi */ void proj_fileapi_set (PJ *P, void *fileapi); const char * const *proj_get_searchpath(void); int proj_get_path_count(void); #ifdef __cplusplus } #endif #endif /* ndef PROJ_INTERNAL_H */
33.859375
88
0.689432
[ "3d" ]
bd2c2d66b9653f7daa9f71985df875c5f7cd0dc4
51,544
c
C
openair1/SIMULATION/LTE_PHY/pdcchsim.c
manishksingh89/openairinterface5g
699b44f2201b17d3e5ecafde16d6f0c9dc76b7c7
[ "Apache-2.0" ]
6
2019-12-27T00:55:47.000Z
2021-11-16T11:36:20.000Z
openair1/SIMULATION/LTE_PHY/pdcchsim.c
manishksingh89/openairinterface5g
699b44f2201b17d3e5ecafde16d6f0c9dc76b7c7
[ "Apache-2.0" ]
2
2021-06-17T05:01:55.000Z
2021-11-24T14:23:54.000Z
openair1/SIMULATION/LTE_PHY/pdcchsim.c
manishksingh89/openairinterface5g
699b44f2201b17d3e5ecafde16d6f0c9dc76b7c7
[ "Apache-2.0" ]
15
2019-12-27T00:55:51.000Z
2022-03-28T02:13:45.000Z
/* * Licensed to the OpenAirInterface (OAI) Software Alliance under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The OpenAirInterface Software Alliance licenses this file to You under * the OAI Public License, Version 1.1 (the "License"); you may not use this file * except in compliance with the License. * You may obtain a copy of the License at * * http://www.openairinterface.org/?page_id=698 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *------------------------------------------------------------------------------- * For more information about the OpenAirInterface (OAI) Software Alliance: * contact@openairinterface.org */ #include <string.h> #include <math.h> #include <unistd.h> #include "SIMULATION/TOOLS/defs.h" #include "SIMULATION/RF/defs.h" #include "PHY/types.h" #include "PHY/defs.h" #include "PHY/vars.h" #include "SCHED/defs.h" #include "SCHED/vars.h" #include "LAYER2/MAC/vars.h" #include "OCG_vars.h" #ifdef XFORMS #include "PHY/TOOLS/lte_phy_scope.h" #endif #include "unitary_defs.h" #define N_TRIALS 100 PHY_VARS_eNB *eNB,*eNB1,*eNB2; PHY_VARS_UE *UE; #define UL_RB_ALLOC 0x1ff; #define CCCH_RB_ALLOC computeRIV(eNB->frame_parms.N_RB_UL,0,2) #define DLSCH_RB_ALLOC ((uint16_t)0x1fbf) // igore DC component,RB13 double cpuf; DCI_PDU DCI_pdu; DCI_PDU *get_dci(LTE_DL_FRAME_PARMS *lte_frame_parms,uint8_t log2L, uint8_t log2Lcommon, DCI_format_t format_selector[MAX_NUM_DCI], uint8_t num_dci, uint32_t rnti) { uint32_t BCCH_alloc_pdu[2]; uint32_t DLSCH_alloc_pdu[2]; uint32_t UL_alloc_pdu[2]; int ind; int dci_length_bytes=0,dci_length=0; int BCCH_pdu_size_bits=0, BCCH_pdu_size_bytes=0; int UL_pdu_size_bits=0, UL_pdu_size_bytes=0; int mcs = 3; DCI_pdu.Num_dci = 0; if (lte_frame_parms->frame_type == TDD) { switch (lte_frame_parms->N_RB_DL) { case 6: dci_length = sizeof_DCI1_1_5MHz_TDD_t; dci_length_bytes = sizeof(DCI1_1_5MHz_TDD_t); ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1_1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->dai = 0; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 0, 4); ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_1_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_1_5MHz_TDD_1_6_t; BCCH_pdu_size_bytes = sizeof(DCI1A_1_5MHz_TDD_1_6_t); ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->dai = 1; ((DCI0_1_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_1_5MHz_TDD_1_6_t; UL_pdu_size_bytes = sizeof(DCI0_1_5MHz_TDD_1_6_t); break; case 25: dci_length = sizeof_DCI1_5MHz_TDD_t; dci_length_bytes = sizeof(DCI1_5MHz_TDD_t); ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1_5MHz_TDD_t *)&DLSCH_alloc_pdu[0])->dai = 0; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 18, 4); ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_5MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_5MHz_TDD_1_6_t; BCCH_pdu_size_bytes = sizeof(DCI1A_5MHz_TDD_1_6_t); ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->dai = 1; ((DCI0_5MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_5MHz_TDD_1_6_t; UL_pdu_size_bytes = sizeof(DCI0_5MHz_TDD_1_6_t); break; case 50: dci_length = sizeof_DCI1_10MHz_TDD_t; dci_length_bytes = sizeof(DCI1_10MHz_TDD_t); ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1_10MHz_TDD_t *)&DLSCH_alloc_pdu[0])->dai = 0; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 30, 4); ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_10MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_10MHz_TDD_1_6_t; BCCH_pdu_size_bytes = sizeof(DCI1A_10MHz_TDD_1_6_t); ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->dai = 1; ((DCI0_10MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_10MHz_TDD_1_6_t; UL_pdu_size_bytes = sizeof(DCI0_10MHz_TDD_1_6_t); break; case 100: dci_length = sizeof_DCI1_20MHz_TDD_t; dci_length_bytes = sizeof(DCI1_20MHz_TDD_t); ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1_20MHz_TDD_t *)&DLSCH_alloc_pdu[0])->dai = 0; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 70, 4); ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_20MHz_TDD_1_6_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_20MHz_TDD_1_6_t; BCCH_pdu_size_bytes = sizeof(DCI1A_20MHz_TDD_1_6_t); ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->dai = 1; ((DCI0_20MHz_TDD_1_6_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_20MHz_TDD_1_6_t; UL_pdu_size_bytes = sizeof(DCI0_20MHz_TDD_1_6_t); break; } } else { //FDD switch (lte_frame_parms->N_RB_DL) { case 6: dci_length = sizeof_DCI1_1_5MHz_FDD_t; dci_length_bytes = sizeof(DCI1_1_5MHz_FDD_t); ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 0, 4); ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_1_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_1_5MHz_FDD_t; BCCH_pdu_size_bytes = sizeof(DCI1A_1_5MHz_FDD_t); ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_1_5MHz_FDD_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_1_5MHz_FDD_t; UL_pdu_size_bytes = sizeof(DCI0_1_5MHz_FDD_t); break; case 25: dci_length = sizeof_DCI1_5MHz_FDD_t; dci_length_bytes = sizeof(DCI1_5MHz_FDD_t); ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_5MHz_FDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 18, 4); ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_5MHz_FDD_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_5MHz_FDD_t; BCCH_pdu_size_bytes = sizeof(DCI1A_5MHz_FDD_t); ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_5MHz_FDD_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_5MHz_FDD_t; UL_pdu_size_bytes = sizeof(DCI0_5MHz_FDD_t); break; case 50: dci_length = sizeof_DCI1_10MHz_FDD_t; dci_length_bytes = sizeof(DCI1_10MHz_FDD_t); ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_10MHz_FDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 30, 4); ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_10MHz_FDD_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_10MHz_FDD_t; BCCH_pdu_size_bytes = sizeof(DCI1A_10MHz_FDD_t); ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_10MHz_FDD_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_10MHz_FDD_t; UL_pdu_size_bytes = sizeof(DCI0_10MHz_FDD_t); break; case 100: dci_length = sizeof_DCI1_20MHz_FDD_t; dci_length_bytes = sizeof(DCI1_20MHz_FDD_t); ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rah = 0; ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->mcs = mcs; ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->harq_pid = 0; ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->ndi = 1; ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->rv = 0; ((DCI1_20MHz_FDD_t *)&DLSCH_alloc_pdu[0])->TPC = 0; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->type = 1; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->vrb_type = 0; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->rballoc = computeRIV(lte_frame_parms->N_RB_DL, 70, 4); ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->ndi = 1; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->rv = 0; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->mcs = 2; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->harq_pid = 0; ((DCI1A_20MHz_FDD_t *)&BCCH_alloc_pdu[0])->TPC = 1; BCCH_pdu_size_bits = sizeof_DCI1A_20MHz_FDD_t; BCCH_pdu_size_bytes = sizeof(DCI1A_20MHz_FDD_t); ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->type = 0; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->hopping = 0; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->rballoc = DLSCH_RB_ALLOC; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->mcs = mcs; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->ndi = 1; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->TPC = 2; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->cshift = 3; ((DCI0_20MHz_FDD_t *)&UL_alloc_pdu[0])->cqi_req = 1; UL_pdu_size_bits = sizeof_DCI0_20MHz_FDD_t; UL_pdu_size_bytes = sizeof(DCI0_20MHz_FDD_t); break; } } for (ind = 0; ind<num_dci; ind++) { if (format_selector[ind]==format1A) { // add common dci DCI_pdu.dci_alloc[ind].dci_length = BCCH_pdu_size_bits; DCI_pdu.dci_alloc[ind].L = log2Lcommon; DCI_pdu.dci_alloc[ind].rnti = SI_RNTI; DCI_pdu.dci_alloc[ind].format = format1A; DCI_pdu.dci_alloc[ind].ra_flag = 0; DCI_pdu.dci_alloc[ind].search_space = DCI_COMMON_SPACE; memcpy((void *)&DCI_pdu.dci_alloc[ind].dci_pdu[0], &BCCH_alloc_pdu[0], BCCH_pdu_size_bytes); DCI_pdu.Num_dci++; printf("Added common dci (%d) for rnti %x\n",ind,SI_RNTI); } if (format_selector[ind]==format1) { DCI_pdu.dci_alloc[ind].dci_length = dci_length; DCI_pdu.dci_alloc[ind].L = log2L; DCI_pdu.dci_alloc[ind].rnti = rnti; DCI_pdu.dci_alloc[ind].format = format1; DCI_pdu.dci_alloc[ind].ra_flag = 0; DCI_pdu.dci_alloc[ind].search_space = DCI_UE_SPACE; memcpy((void *)&DCI_pdu.dci_alloc[ind].dci_pdu[0], &DLSCH_alloc_pdu[0], dci_length_bytes); DCI_pdu.Num_dci++; } if (format_selector[ind]==format0) { DCI_pdu.dci_alloc[ind].dci_length = UL_pdu_size_bits; DCI_pdu.dci_alloc[ind].L = log2L; DCI_pdu.dci_alloc[ind].rnti = rnti; DCI_pdu.dci_alloc[ind].format = format0; DCI_pdu.dci_alloc[ind].ra_flag = 0; DCI_pdu.dci_alloc[ind].search_space = DCI_UE_SPACE; memcpy((void *)&DCI_pdu.dci_alloc[ind].dci_pdu[0], &UL_alloc_pdu[0], UL_pdu_size_bytes); DCI_pdu.Num_dci++; } } return(&DCI_pdu); } extern int QPSK[4],QPSK2[4]; int main(int argc, char **argv) { char c; int i,l,aa; double sigma2, sigma2_dB=0,SNR,snr0=-2.0,snr1; int **txdata; double s_re[2][30720*2],s_im[2][30720*2],r_re[2][30720*2],r_im[2][30720*2]; double iqim=0.0; // int subframe_offset; uint8_t subframe=0; #ifdef XFORMS FD_lte_phy_scope_ue *form_ue; char title[255]; #endif int trial, n_errors_common=0,n_errors_ul=0,n_errors_dl=0,n_errors_cfi=0,n_errors_hi=0; unsigned char eNb_id = 0; uint8_t awgn_flag=0; int n_frames=1; channel_desc_t *eNB2UE; uint32_t nsymb,tx_lev,tx_lev_dB=0,num_pdcch_symbols=3; uint8_t extended_prefix_flag=0,transmission_mode=1,n_tx=1,n_rx=1; uint16_t Nid_cell=0; // int8_t interf1=-128,interf2=-128; uint8_t dci_cnt=0; LTE_DL_FRAME_PARMS *frame_parms; uint8_t log2L=2, log2Lcommon=2; DCI_format_t format_selector[MAX_NUM_DCI]; uint8_t num_dci=0; uint8_t numCCE,common_active=0,ul_active=0,dl_active=0; uint32_t n_trials_common=0,n_trials_ul=0,n_trials_dl=0,false_detection_cnt=0; uint8_t common_rx,ul_rx,dl_rx; uint8_t tdd_config=3; FILE *input_fd=NULL; char input_val_str[50],input_val_str2[50]; uint16_t n_rnti=0x1234; uint8_t osf=1,N_RB_DL=25; SCM_t channel_model=Rayleigh1_anticorr; DCI_ALLOC_t dci_alloc_rx[8]; int ret; uint8_t harq_pid; uint8_t phich_ACK; uint8_t num_phich_interf = 0; lte_frame_type_t frame_type=TDD; // int re_offset; // uint32_t *txptr; int aarx; int k; uint32_t perfect_ce = 0; int CCE_table[800]; number_of_cards = 1; cpuf = get_cpu_freq_GHz(); logInit(); while ((c = getopt (argc, argv, "hapFg:R:c:n:s:x:y:z:L:M:N:I:f:i:S:P:Y")) != -1) { switch (c) { case 'a': printf("Running AWGN simulation\n"); awgn_flag = 1; break; case 'R': N_RB_DL = atoi(optarg); break; case 'F': frame_type = FDD; break; case 'c': tdd_config=atoi(optarg); if (tdd_config>6) { printf("Illegal tdd_config %d (should be 0-6)\n",tdd_config); exit(-1); } break; case 'g': switch((char)*optarg) { case 'A': channel_model=SCM_A; break; case 'B': channel_model=SCM_B; break; case 'C': channel_model=SCM_C; break; case 'D': channel_model=SCM_D; break; case 'E': channel_model=EPA; break; case 'F': channel_model=EVA; break; case 'G': channel_model=ETU; break; default: printf("Unsupported channel model!\n"); exit(-1); } break; /* case 'i': interf1=atoi(optarg); break; case 'j': interf2=atoi(optarg); break; */ case 'n': n_frames = atoi(optarg); break; case 's': snr0 = atoi(optarg); break; case 'p': extended_prefix_flag=1; break; case 'x': transmission_mode=atoi(optarg); if ((transmission_mode!=1) && (transmission_mode!=2) && (transmission_mode!=6)) { printf("Unsupported transmission mode %d\n",transmission_mode); exit(-1); } break; case 'y': n_tx=atoi(optarg); if ((n_tx==0) || (n_tx>2)) { printf("Unsupported number of tx antennas %d\n",n_tx); exit(-1); } break; case 'z': n_rx=atoi(optarg); if ((n_rx==0) || (n_rx>2)) { printf("Unsupported number of rx antennas %d\n",n_rx); exit(-1); } break; case 'S': subframe=atoi(optarg); break; case 'L': log2L=atoi(optarg); if ((log2L!=0)&& (log2L!=1)&& (log2L!=2)&& (log2L!=3)) { printf("Unsupported DCI aggregation level %d (should be 0,1,2,3)\n",log2L); exit(-1); } break; case 'M': log2Lcommon=atoi(optarg); if ((log2Lcommon!=2)&& (log2Lcommon!=3)) { printf("Unsupported Common DCI aggregation level %d (should be 2 or 3)\n",log2Lcommon); exit(-1); } break; case 'N': format_selector[num_dci] = (DCI_format_t) atoi(optarg); if ((format_selector[num_dci]<format0) || (format_selector[num_dci] > format1A)) { printf("only formats 0, 1, and 1A supported for the moment\n"); exit(-1); } if (format_selector[num_dci]==format0) ul_active=1; if (format_selector[num_dci]==format1A) common_active=1; if (format_selector[num_dci]==format1) dl_active=1; num_dci++; break; case 'O': osf = atoi(optarg); break; case 'I': Nid_cell = atoi(optarg); break; case 'f': input_fd = fopen(optarg,"r"); if (input_fd==NULL) { printf("Problem with filename %s\n",optarg); exit(-1); } break; case 'i': n_rnti=atoi(optarg); break; case 'P': num_phich_interf=atoi(optarg); break; case 'Y': perfect_ce = 1; break; case 'h': printf("%s -h(elp) -a(wgn on) -c tdd_config -n n_frames -r RiceanFactor -s snr0 -t Delayspread -x transmission mode (1,2,6) -y TXant -z RXant -L AggregLevelUEspec -M AggregLevelCommonDCI -N DCIFormat\n\n", argv[0]); printf("-h This message\n"); printf("-a Use AWGN channel and not multipath\n"); printf("-c TDD config\n"); printf("-S Subframe number (0..9)\n"); printf("-R N_RB_DL\n"); printf("-F use FDD frame\n"); printf("-p Use extended prefix mode\n"); printf("-n Number of frames to simulate\n"); printf("-r Ricean factor (dB, 0 means Rayleigh, 100 is almost AWGN\n"); printf("-s Starting SNR, runs from SNR to SNR + 5 dB. If n_frames is 1 then just SNR is simulated\n"); printf("-t Delay spread for multipath channel\n"); printf("-x Transmission mode (1,2,6 for the moment)\n"); printf("-y Number of TX antennas used in eNB\n"); printf("-z Number of RX antennas used in UE\n"); printf("-P Number of interfering PHICH\n"); printf("-L log2 of Aggregation level for UE Specific DCI (0,1,2,3)\n"); printf("-M log2 Aggregation level for Common DCI (4,8)\n"); printf("-N Format for UE Spec DCI (0 - format0,\n"); printf(" 1 - format1,\n"); printf(" 2 - format1A,\n"); printf(" 3 - format1B_2A,\n"); printf(" 4 - format1B_4A,\n"); printf(" 5 - format1C,\n"); printf(" 6 - format1D_2A,\n"); printf(" 7 - format1D_4A,\n"); printf(" 8 - format2A_2A_L10PRB,\n"); printf(" 9 - format2A_2A_M10PRB,\n"); printf(" 10 - format2A_4A_L10PRB,\n"); printf(" 11 - format2A_4A_M10PRB,\n"); printf(" 12 - format2_2A_L10PRB,\n"); printf(" 13 - format2_2A_M10PRB,\n"); printf(" 14 - format2_4A_L10PRB,\n"); printf(" 15 - format2_4A_M10PRB\n"); printf(" 16 - format2_2D_M10PRB\n"); printf(" 17 - format2_2D_L10PRB\n"); printf(" can be called multiple times to add more than one DCI\n"); printf("-O Oversampling factor\n"); printf("-I Cell Id\n"); printf("-F Input sample stream\n"); exit(1); break; } } if ((transmission_mode>1) && (n_tx==1)) n_tx=2; lte_param_init(n_tx, n_tx, n_rx, transmission_mode, extended_prefix_flag, frame_type, Nid_cell, tdd_config, N_RB_DL, 0, osf, perfect_ce); #ifdef XFORMS fl_initialize (&argc, argv, NULL, 0, 0); form_ue = create_lte_phy_scope_ue(); sprintf (title, "LTE PHY SCOPE UE"); fl_show_form (form_ue->lte_phy_scope_ue, FL_PLACE_HOTSPOT, FL_FULLBORDER, title); #endif mac_xface->computeRIV = computeRIV; mac_xface->frame_parms = &eNB->frame_parms; // init_transport_channels(transmission_mode); if (n_frames==1) snr1 = snr0+.1; else snr1 = snr0+8.0; printf("SNR0 %f, SNR1 %f\n",snr0,snr1); frame_parms = &eNB->frame_parms; printf("Getting %d dcis\n",num_dci); get_dci(frame_parms, log2L, log2Lcommon, format_selector, num_dci, n_rnti); txdata = eNB->common_vars.txdata[eNb_id]; nsymb = (eNB->frame_parms.Ncp == 0) ? 14 : 12; printf("Subframe %d, FFT Size %d, Extended Prefix %d, Samples per subframe %d, Symbols per subframe %d\n", subframe,NUMBER_OF_OFDM_CARRIERS, eNB->frame_parms.Ncp,eNB->frame_parms.samples_per_tti,nsymb); eNB2UE = new_channel_desc_scm(eNB->frame_parms.nb_antennas_tx, UE->frame_parms.nb_antennas_rx, channel_model, N_RB2sampling_rate(eNB->frame_parms.N_RB_DL), N_RB2channel_bandwidth(eNB->frame_parms.N_RB_DL), 0, 0, 0, 0); L1_rxtx_proc_t *proc_rxtx = (subframe == 0)? &eNB->proc.L1_proc: &eNB->proc.L1_proc_tx; eNB->ulsch[0] = new_eNB_ulsch(MAX_TURBO_ITERATIONS,N_RB_DL,0); UE->ulsch[0] = new_ue_ulsch(N_RB_DL,0); proc_rxtx->frame_tx = 0; proc_rxtx->subframe_tx = subframe; if (input_fd==NULL) { printf("No input file, so starting TX\n"); } else { i=0; while (!feof(input_fd)) { ret=fscanf(input_fd,"%49s %49s",input_val_str,input_val_str2);//&input_val1,&input_val2); if (ret != 2) { printf("%s:%d:%s: fscanf error, exiting\n", __FILE__, __LINE__, __FUNCTION__); exit(1); } if ((i%4)==0) { ((short *)txdata[0])[i/2] = (short)((1<<15)*strtod(input_val_str,NULL)); ((short *)txdata[0])[(i/2)+1] = (short)((1<<15)*strtod(input_val_str2,NULL)); if ((i/4)<100) printf("sample %d => %e + j%e (%d +j%d)\n",i/4,strtod(input_val_str,NULL),strtod(input_val_str2,NULL),((short *)txdata[0])[i/4],((short *)txdata[0])[(i/4)+1]); //1,input_val2,); } i++; if (i>(4*FRAME_LENGTH_SAMPLES)) break; } printf("Read in %d samples\n",i/4); LOG_M("txsig0.m","txs0", txdata[0],FRAME_LENGTH_COMPLEX_SAMPLES,1,1); // LOG_M("txsig1.m","txs1", txdata[1],FRAME_LENGTH_COMPLEX_SAMPLES,1,1); tx_lev = signal_energy(&txdata[0][0], OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES); tx_lev_dB = (unsigned int) dB_fixed(tx_lev); } UE->UE_mode[0] = PUSCH; // nCCE_max = get_nCCE(3,&eNB->frame_parms,get_mi(&eNB->frame_parms,0)); //printf("nCCE_max %d\n",nCCE_max); //printf("num_phich interferers %d\n",num_phich_interf); for (SNR=snr0; SNR<snr1; SNR+=0.2) { n_errors_common = 0; n_errors_ul = 0; n_errors_dl = 0; n_errors_cfi = 0; n_errors_hi = 0; n_trials_common=0; n_trials_ul=0; n_trials_dl=0; for (trial=0; trial<n_frames; trial++) { // printf("DCI (SF %d): txdataF %p (0 %p)\n",subframe,&eNB->common_vars.txdataF[eNb_id][aa][512*14*subframe],&eNB->common_vars.txdataF[eNb_id][aa][0]); for (aa=0; aa<eNB->frame_parms.nb_antennas_tx; aa++) { memset(&eNB->common_vars.txdataF[eNb_id][aa][0],0,FRAME_LENGTH_COMPLEX_SAMPLES_NO_PREFIX*sizeof(int32_t)); } generate_pilots_slot(eNB, eNB->common_vars.txdataF[eNb_id], AMP, //1024, (subframe*2), 0); generate_pilots_slot(eNB, eNB->common_vars.txdataF[eNb_id], AMP, //1024, (subframe*2)+1, 0); if (input_fd == NULL) { numCCE=0; n_trials_common++; common_active = 1; if (eNB->frame_parms.N_RB_DL >= 50) { if (ul_active==1) { n_trials_ul++; } } if (eNB->frame_parms.N_RB_DL >= 25) { if (dl_active==1) { n_trials_dl++; } } num_pdcch_symbols = get_num_pdcch_symbols(DCI_pdu.Num_dci, DCI_pdu.dci_alloc, frame_parms, subframe); numCCE = get_nCCE(num_pdcch_symbols,&eNB->frame_parms,get_mi(&eNB->frame_parms,subframe)); if (n_frames==1) { printf("num_dci %d, num_pddch_symbols %d, nCCE %d\n", DCI_pdu.Num_dci, num_pdcch_symbols,numCCE); } // apply RNTI-based nCCE allocation memset(CCE_table,0,800*sizeof(int)); for (i = 0; i < DCI_pdu.Num_dci; i++) { // SI RNTI if (DCI_pdu.dci_alloc[i].rnti == SI_RNTI) { DCI_pdu.dci_alloc[i].firstCCE = get_nCCE_offset_l1(CCE_table, 1<<DCI_pdu.dci_alloc[i].L, numCCE, 1, SI_RNTI, subframe); } // RA RNTI else if (DCI_pdu.dci_alloc[i].ra_flag == 1) { DCI_pdu.dci_alloc[i].firstCCE = get_nCCE_offset_l1(CCE_table, 1<<DCI_pdu.dci_alloc[i].L, numCCE, 1, DCI_pdu.dci_alloc[i].rnti, subframe); } // C RNTI else { DCI_pdu.dci_alloc[i].firstCCE = get_nCCE_offset_l1(CCE_table, 1<<DCI_pdu.dci_alloc[i].L, numCCE, 0, DCI_pdu.dci_alloc[i].rnti, subframe); } if (n_frames==1) printf("dci %d: rnti 0x%x, format %d, L %d (aggreg %d), nCCE %d/%d dci_length %d\n",i,DCI_pdu.dci_alloc[i].rnti, DCI_pdu.dci_alloc[i].format, DCI_pdu.dci_alloc[i].L, 1<<DCI_pdu.dci_alloc[i].L, DCI_pdu.dci_alloc[i].firstCCE, numCCE, DCI_pdu.dci_alloc[i].dci_length); if (DCI_pdu.dci_alloc[i].firstCCE==-1) exit(-1); } num_pdcch_symbols = generate_dci_top(DCI_pdu.Num_dci, DCI_pdu.dci_alloc, 0, AMP, &eNB->frame_parms, eNB->common_vars.txdataF[eNb_id], subframe); if (n_frames==1) printf("num_pdcch_symbols at TX %d\n",num_pdcch_symbols); if (is_phich_subframe(&eNB->frame_parms,subframe)) { if (n_frames==1) printf("generating PHICH\n"); harq_pid = phich_subframe_to_harq_pid(&eNB->frame_parms, proc_rxtx->frame_tx, subframe); phich_ACK = taus()&1; eNB->ulsch[0]->harq_processes[harq_pid]->phich_active = 1; eNB->ulsch[0]->harq_processes[harq_pid]->first_rb = 0; eNB->ulsch[0]->harq_processes[harq_pid]->n_DMRS = 0; eNB->ulsch[0]->harq_processes[harq_pid]->phich_ACK = phich_ACK; eNB->ulsch[0]->harq_processes[harq_pid]->dci_alloc = 1; UE->ulsch[0]->harq_processes[harq_pid]->first_rb = 0; UE->ulsch[0]->harq_processes[harq_pid]->n_DMRS = 0; generate_phich_top(eNB,proc_rxtx,AMP,0); // generate 3 interfering PHICH if (num_phich_interf>0) { eNB->ulsch[0]->harq_processes[harq_pid]->first_rb = 4; generate_phich_top(eNB,proc_rxtx,1024,0); } if (num_phich_interf>1) { eNB->ulsch[0]->harq_processes[harq_pid]->first_rb = 8; eNB->ulsch[0]->harq_processes[harq_pid]->n_DMRS = 1; generate_phich_top(eNB,proc_rxtx,1024,0); } if (num_phich_interf>2) { eNB->ulsch[0]->harq_processes[harq_pid]->first_rb = 12; eNB->ulsch[0]->harq_processes[harq_pid]->n_DMRS = 1; generate_phich_top(eNB,proc_rxtx,1024,0); } eNB->ulsch[0]->harq_processes[harq_pid]->first_rb = 0; } // LOG_M("pilotsF.m","rsF",txdataF[0],lte_eNB->frame_parms.ofdm_symbol_size,1,1); if (n_frames==1) { LOG_M("txsigF0.m","txsF0", eNB->common_vars.txdataF[eNb_id][0],4*nsymb*OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES_NO_PREFIX,1,1); if (eNB->frame_parms.nb_antenna_ports_eNB > 1) LOG_M("txsigF1.m","txsF1", eNB->common_vars.txdataF[eNb_id][1],4*nsymb*OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES_NO_PREFIX,1,1); } tx_lev = 0; for (aa=0; aa<eNB->frame_parms.nb_antenna_ports_eNB; aa++) { if (eNB->frame_parms.Ncp == 1) PHY_ofdm_mod(&eNB->common_vars.txdataF[eNb_id][aa][subframe*nsymb*eNB->frame_parms.ofdm_symbol_size], // input, &txdata[aa][subframe*eNB->frame_parms.samples_per_tti], // output eNB->frame_parms.ofdm_symbol_size, 2*nsymb, // number of symbols eNB->frame_parms.nb_prefix_samples, // number of prefix samples CYCLIC_PREFIX); else { normal_prefix_mod(&eNB->common_vars.txdataF[eNb_id][aa][subframe*nsymb*eNB->frame_parms.ofdm_symbol_size], &txdata[aa][subframe*eNB->frame_parms.samples_per_tti], 2*nsymb, frame_parms); } tx_lev += signal_energy(&txdata[aa][subframe*eNB->frame_parms.samples_per_tti], eNB->frame_parms.ofdm_symbol_size); } tx_lev_dB = (unsigned int) dB_fixed(tx_lev); } for (i=0; i<2*nsymb*OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES; i++) { for (aa=0; aa<eNB->frame_parms.nb_antenna_ports_eNB; aa++) { if (awgn_flag == 0) { s_re[aa][i] = ((double)(((short *)txdata[aa]))[(2*subframe*UE->frame_parms.samples_per_tti) + (i<<1)]); s_im[aa][i] = ((double)(((short *)txdata[aa]))[(2*subframe*UE->frame_parms.samples_per_tti) + (i<<1)+1]); } else { for (aarx=0; aarx<UE->frame_parms.nb_antennas_rx; aarx++) { if (aa==0) { r_re[aarx][i] = ((double)(((short *)txdata[aa]))[(2*subframe*UE->frame_parms.samples_per_tti) + (i<<1)]); r_im[aarx][i] = ((double)(((short *)txdata[aa]))[(2*subframe*UE->frame_parms.samples_per_tti) + (i<<1)+1]); } else { r_re[aarx][i] += ((double)(((short *)txdata[aa]))[(2*subframe*UE->frame_parms.samples_per_tti) + (i<<1)]); r_im[aarx][i] += ((double)(((short *)txdata[aa]))[(2*subframe*UE->frame_parms.samples_per_tti) + (i<<1)+1]); } } } } } if (awgn_flag == 0) { multipath_channel(eNB2UE,s_re,s_im,r_re,r_im, 2*nsymb*OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES,0); } //LOG_M("channel0.m","chan0",ch[0],channel_length,1,8); // scale by path_loss = NOW - P_noise //sigma2 = pow(10,sigma2_dB/10); //N0W = -95.87; sigma2_dB = (double)tx_lev_dB +10*log10((double)eNB->frame_parms.ofdm_symbol_size/(double)(12*eNB->frame_parms.N_RB_DL)) - SNR; if (n_frames==1) printf("sigma2_dB %f (SNR %f dB) tx_lev_dB %d\n",sigma2_dB,SNR,tx_lev_dB); //AWGN sigma2 = pow(10,sigma2_dB/10); // printf("Sigma2 %f (sigma2_dB %f)\n",sigma2,sigma2_dB); for (i=0; i<2*nsymb*OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES; i++) { for (aa=0; aa<UE->frame_parms.nb_antennas_rx; aa++) { ((short *) UE->common_vars.rxdata[aa])[(2*subframe*UE->frame_parms.samples_per_tti) + 2*i] = (short) (.667*(r_re[aa][i] + sqrt(sigma2/2)*gaussdouble(0.0,1.0))); ((short *) UE->common_vars.rxdata[aa])[(2*subframe*UE->frame_parms.samples_per_tti) + 2*i+1] = (short) (.667*(r_im[aa][i] + (iqim*r_re[aa][i]) + sqrt(sigma2/2)*gaussdouble( 0.0,1.0))); /* ((short*)UE->common_vars.rxdata[aa])[(2*subframe*UE->frame_parms.samples_per_tti) + 2*i] = ((short*)txdata[aa])[(2*subframe*UE->frame_parms.samples_per_tti) + 2*i]; ((short*)UE->common_vars.rxdata[aa])[(2*subframe*UE->frame_parms.samples_per_tti) + 2*i+1] = ((short*)txdata[aa])[(2*subframe*UE->frame_parms.samples_per_tti) + 2*i+1]; */ } } // UE receiver for (l=0; l<eNB->frame_parms.symbols_per_tti; l++) { // subframe_offset = (l/eNB->frame_parms.symbols_per_tti)*eNB->frame_parms.samples_per_tti; // printf("subframe_offset = %d\n",subframe_offset); slot_fep(UE, l%(eNB->frame_parms.symbols_per_tti/2), (2*subframe)+(l/(eNB->frame_parms.symbols_per_tti/2)), 0, 0, 0); if (UE->perfect_ce == 1) { if (awgn_flag==0) { // fill in perfect channel estimates freq_channel(eNB2UE,UE->frame_parms.N_RB_DL,12*UE->frame_parms.N_RB_DL + 1); //LOG_M("channel.m","ch",desc1->ch[0],desc1->channel_length,1,8); //LOG_M("channelF.m","chF",desc1->chF[0],nb_samples,1,8); for(k=0; k<NUMBER_OF_eNB_MAX; k++) { for(aa=0; aa<frame_parms->nb_antennas_tx; aa++) { for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) { for (i=0; i<frame_parms->N_RB_DL*12; i++) { ((int16_t *) UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].dl_ch_estimates[k][(aa<<1)+aarx])[2*i+(l*frame_parms->ofdm_symbol_size+LTE_CE_FILTER_LENGTH)*2]=(int16_t)( eNB2UE->chF[aarx+(aa*frame_parms->nb_antennas_rx)][i].x*AMP); ((int16_t *) UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].dl_ch_estimates[k][(aa<<1)+aarx])[2*i+1+(l*frame_parms->ofdm_symbol_size+LTE_CE_FILTER_LENGTH)*2]=(int16_t)( eNB2UE->chF[aarx+(aa*frame_parms->nb_antennas_rx)][i].y*AMP); } } } } } else { for(aa=0; aa<frame_parms->nb_antenna_ports_eNB; aa++) { for (aarx=0; aarx<frame_parms->nb_antennas_rx; aarx++) { for (i=0; i<frame_parms->N_RB_DL*12; i++) { ((int16_t *) UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].dl_ch_estimates[0][(aa<<1)+aarx])[2*i+(l*frame_parms->ofdm_symbol_size+LTE_CE_FILTER_LENGTH)*2]=(short)(AMP); ((int16_t *) UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].dl_ch_estimates[0][(aa<<1)+aarx])[2*i+1+(l*frame_parms->ofdm_symbol_size+LTE_CE_FILTER_LENGTH)*2]=0/2; } } } } } if (l==((eNB->frame_parms.Ncp==0)?4:3)) { // LOG_M("H00.m","h00",&(UE->common_vars.dl_ch_estimates[0][0][0]),((frame_parms->Ncp==0)?7:6)*(eNB->frame_parms.ofdm_symbol_size),1,1); // do PDCCH procedures here UE->pdcch_vars[0][0]->crnti = n_rnti; // printf("Doing RX : num_pdcch_symbols at TX %d\n",num_pdcch_symbols); rx_pdcch(UE, trial, subframe, 0, (UE->frame_parms.mode1_flag == 1) ? SISO : ALAMOUTI, UE->high_speed_flag, UE->is_secondary_ue); if (is_phich_subframe(&UE->frame_parms,subframe)) { UE->ulsch[0]->harq_processes[phich_subframe_to_harq_pid(&UE->frame_parms,0,subframe)]->status = ACTIVE; //UE->ulsch[0]->harq_processes[phich_subframe_to_harq_pid(&UE->frame_parms,0,subframe)]->Ndi = 1; rx_phich(UE, &UE->proc.proc_rxtx[subframe&1], subframe, 0); } // if (UE->pdcch_vars[0]->num_pdcch_symbols != num_pdcch_symbols) // break; dci_cnt = dci_decoding_procedure(UE, dci_alloc_rx,1, 0,subframe); common_rx=0; ul_rx=0; dl_rx=0; if (n_frames==1) { numCCE = get_nCCE(UE->pdcch_vars[0][0]->num_pdcch_symbols, &UE->frame_parms, get_mi(&UE->frame_parms,subframe)); for (i = 0; i < dci_cnt; i++) printf("dci %d: rnti 0x%x, format %d, L %d, nCCE %d/%d dci_length %d\n",i, dci_alloc_rx[i].rnti, dci_alloc_rx[i].format, dci_alloc_rx[i].L, dci_alloc_rx[i].firstCCE, numCCE, dci_alloc_rx[i].dci_length); } for (i=0; i<dci_cnt; i++) { if (dci_alloc_rx[i].rnti == SI_RNTI) { if (n_frames==1) dump_dci(&UE->frame_parms, &dci_alloc_rx[i]); common_rx=1; } if ((dci_alloc_rx[i].rnti == n_rnti) && (dci_alloc_rx[i].format == format0)) { if (n_frames==1) dump_dci(&UE->frame_parms, &dci_alloc_rx[i]); ul_rx=1; } if ((dci_alloc_rx[i].rnti == n_rnti) && ((dci_alloc_rx[i].format == format1))) { if (n_frames==1) dump_dci(&UE->frame_parms, &dci_alloc_rx[i]); dl_rx=1; } if ((dci_alloc_rx[i].rnti != n_rnti) && (dci_alloc_rx[i].rnti != SI_RNTI)) false_detection_cnt++; } if (n_frames==1) printf("RX DCI Num %d (Common DCI %d, DL DCI %d, UL DCI %d)\n", dci_cnt, common_rx, dl_rx, ul_rx); if ((common_rx==0)&&(common_active==1)) n_errors_common++; if ((ul_rx==0)&&(ul_active==1)) { n_errors_ul++; // exit(-1); } if ((dl_rx==0)&&(dl_active==1)) { n_errors_dl++; // exit(-1); } if (UE->pdcch_vars[0][0]->num_pdcch_symbols != num_pdcch_symbols) n_errors_cfi++; /* if (is_phich_subframe(&UE->frame_parms,subframe)) if (UE->ulsch[0]->harq_processes[phich_subframe_to_harq_pid(&UE->frame_parms, UE->frame, subframe)]->Ndi != phich_ACK) n_errors_hi++; */ if (n_errors_cfi > 10) break; } } // symbol loop if (n_errors_cfi > 100) break; if ((n_errors_ul>1000) && (n_errors_dl>1000) && (n_errors_common>1000)) break; #ifdef XFORMS phy_scope_UE(form_ue, UE, eNb_id,0,subframe); #endif } //trials if (common_active) printf("SNR %f : n_errors_common = %d/%d (%e)\n", SNR,n_errors_common,n_trials_common,(double)n_errors_common/n_trials_common); if (ul_active==1) printf("SNR %f : n_errors_ul = %d/%d (%e)\n", SNR,n_errors_ul,n_trials_ul,(double)n_errors_ul/n_trials_ul); if (dl_active==1) printf("SNR %f : n_errors_dl = %d/%d (%e)\n", SNR,n_errors_dl,n_trials_dl,(double)n_errors_dl/n_trials_dl); printf("SNR %f : n_errors_cfi = %d/%d (%e)\n", SNR,n_errors_cfi,trial,(double)n_errors_cfi/trial); printf("SNR %f : n_errors_hi = %d/%d (%e)\n", SNR,n_errors_hi,trial,(double)n_errors_hi/trial); } // SNR if (n_frames==1) { LOG_M("txsig0.m","txs0", txdata[0],FRAME_LENGTH_COMPLEX_SAMPLES,1,1); if (n_tx>1) LOG_M("txsig1.m","txs1", txdata[1],FRAME_LENGTH_COMPLEX_SAMPLES,1,1); LOG_M("rxsig0.m","rxs0", UE->common_vars.rxdata[0],10*frame_parms->samples_per_tti,1,1); LOG_M("rxsigF0.m","rxsF0", UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].rxdataF[0],NUMBER_OF_OFDM_CARRIERS*2*((frame_parms->Ncp==0)?14:12),2,1); if (n_rx>1) { LOG_M("rxsig1.m","rxs1", UE->common_vars.rxdata[1],10*frame_parms->samples_per_tti,1,1); LOG_M("rxsigF1.m","rxsF1", UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].rxdataF[1],NUMBER_OF_OFDM_CARRIERS*2*((frame_parms->Ncp==0)?14:12),2,1); } LOG_M("H00.m","h00",&(UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].dl_ch_estimates[0][0][0]),((frame_parms->Ncp==0)?7:6)*(eNB->frame_parms.ofdm_symbol_size),1,1); if (n_tx==2) LOG_M("H10.m","h10",&(UE->common_vars.common_vars_rx_data_per_thread[subframe&0x1].dl_ch_estimates[0][2][0]),((frame_parms->Ncp==0)?7:6)*(eNB->frame_parms.ofdm_symbol_size),1,1); LOG_M("pdcch_rxF_ext0.m","pdcch_rxF_ext0",UE->pdcch_vars[0][eNb_id]->rxdataF_ext[0],3*12*UE->frame_parms.N_RB_DL,1,1); LOG_M("pdcch_rxF_comp0.m","pdcch0_rxF_comp0",UE->pdcch_vars[0][eNb_id]->rxdataF_comp[0],4*12*UE->frame_parms.N_RB_DL,1,1); LOG_M("pdcch_rxF_llr.m","pdcch_llr",UE->pdcch_vars[0][eNb_id]->llr,2400,1,4); } lte_sync_time_free(); if ( input_fd != NULL) fclose(input_fd); return(n_errors_ul); } /* for (i=1;i<4;i++) memcpy((void *)&PHY_vars->tx_vars[0].TX_DMA_BUFFER[i*12*OFDM_SYMBOL_SIZE_COMPLEX_SAMPLES_NO_PREFIX*2], (void *)&PHY_vars->tx_vars[0].TX_DMA_BUFFER[0], 12*OFDM_SYMBOL_SIZE_SAMPLES_NO_PREFIX*2); */
43.497046
213
0.555681
[ "model" ]
bd2e09de022a8aedb7c6d5c1a80d0c67ab9bd930
2,323
h
C
atlas_npu/ascendbase/src/Base/Log/Log.h
wangshankun/Tengine_Atlas
b5485039e72b4a624c795ff95d73eb6d719c7706
[ "Apache-2.0" ]
null
null
null
atlas_npu/ascendbase/src/Base/Log/Log.h
wangshankun/Tengine_Atlas
b5485039e72b4a624c795ff95d73eb6d719c7706
[ "Apache-2.0" ]
null
null
null
atlas_npu/ascendbase/src/Base/Log/Log.h
wangshankun/Tengine_Atlas
b5485039e72b4a624c795ff95d73eb6d719c7706
[ "Apache-2.0" ]
null
null
null
/* * Copyright (c) 2020.Huawei Technologies Co., Ltd. All rights reserved. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef LOG_H #define LOG_H #include <mutex> #include <sstream> #include <string> #include <vector> namespace AtlasAscendLog { // log level enum LogLevels { LOG_LEVEL_DEBUG = 0, LOG_LEVEL_INFO = 1, LOG_LEVEL_WARN = 2, LOG_LEVEL_ERROR = 3, LOG_LEVEL_FATAL = 4, LOG_LEVEL_NONE }; class Log { public: Log(std::string file, std::string function, int line, uint32_t level); ~Log(); std::ostringstream &Stream(); // log switch, turn on and off both screen and file log of special level. static void LogDebugOn(); static void LogInfoOn(); static void LogWarnOn(); static void LogErrorOn(); static void LogFatalOn(); static void LogAllOn(); static void LogAllOff(); private: std::ostringstream ss_; uint32_t myLevel_; std::string date_; static uint32_t logLevel; static std::vector<std::string> levelString; static std::mutex mutex; static std::string logFile; }; } // namespace AtlasAscendLog #define LogDebug AtlasAscendLog::Log(__FILE__, __FUNCTION__, __LINE__, AtlasAscendLog::LOG_LEVEL_DEBUG).Stream() #define LogInfo AtlasAscendLog::Log(__FILE__, __FUNCTION__, __LINE__, AtlasAscendLog::LOG_LEVEL_INFO).Stream() #define LogWarn AtlasAscendLog::Log(__FILE__, __FUNCTION__, __LINE__, AtlasAscendLog::LOG_LEVEL_WARN).Stream() #define LogError AtlasAscendLog::Log(__FILE__, __FUNCTION__, __LINE__, AtlasAscendLog::LOG_LEVEL_ERROR).Stream() #define LogFatal AtlasAscendLog::Log(__FILE__, __FUNCTION__, __LINE__, AtlasAscendLog::LOG_LEVEL_FATAL).Stream() #define LOG(security) AtlasAscendLog::LOG_##security.Stream() #endif
33.666667
113
0.714163
[ "vector" ]
bd34550c779aa2421f5b2e83c2228f94a1f790a2
483
h
C
include/europa/gc/header.h
leoagomes/europa
1c588c3f90fd98e9c60304c3926a7074d5e9cfbd
[ "MIT" ]
1
2020-07-30T19:33:53.000Z
2020-07-30T19:33:53.000Z
include/europa/gc/header.h
leoagomes/europa
1c588c3f90fd98e9c60304c3926a7074d5e9cfbd
[ "MIT" ]
null
null
null
include/europa/gc/header.h
leoagomes/europa
1c588c3f90fd98e9c60304c3926a7074d5e9cfbd
[ "MIT" ]
null
null
null
#ifndef __EUROPA_GC_HEADER_H__ #define __EUROPA_GC_HEADER_H__ #include "europa/int.h" #include "europa/common.h" struct europa_object; #define EU_OBJECT_GC_HEADER \ struct europa_object *_previous; /*!< previous heap object list item */\ struct europa_object *_next; /*!< next heap object list item */\ unsigned int _reference_count; /*!< number of references to the object */\ unsigned char _color; /*!< m&s object color */\ unsigned char _type; /*!< object type */ #endif
28.411765
75
0.73706
[ "object" ]
bd35eb9211be9275c1ae8fbc351d48bc088f6ec7
781
h
C
src/swarm.h
hemulens/Particle-Explosion
901287054b4f4db6c4df602b2fa5d9dd5fa28f36
[ "MIT" ]
1
2021-07-28T08:15:37.000Z
2021-07-28T08:15:37.000Z
src/swarm.h
hemulens/Particle-Explosion
901287054b4f4db6c4df602b2fa5d9dd5fa28f36
[ "MIT" ]
null
null
null
src/swarm.h
hemulens/Particle-Explosion
901287054b4f4db6c4df602b2fa5d9dd5fa28f36
[ "MIT" ]
1
2021-01-04T20:17:33.000Z
2021-01-04T20:17:33.000Z
#ifndef SWARM_H_ #define SWARM_H_ #include "particle.h" namespace simulation { class Swarm { public: Swarm(); virtual ~Swarm(); // virtual destructor is needed to prevent undefined behavior when deleting a derived class object using a pointer of base class type; more: https://www.geeksforgeeks.org/virtual-destructor/ const static int NPARTICLES{5000}; const Particle *const getParticles(); // const pointer - the pointer can't be pointed anywhere else; the particles can't be changed either using the pointer returned void update(int elapsed); private: Particle *const _particles; // const pointer to a particle (which itself isn't a const) int _lastUpdateTime; }; } /* namespace simulation */ #endif /* SWARM_H_ */
33.956522
215
0.709347
[ "object" ]
bd3610101c8db48ef845fb94184a99f955574991
19,798
c
C
src/util/bsendutil2.c
RWTH-OS/MP-MPICH
f2ae296477bb9d812fda587221b3419c09f85b4a
[ "mpich2" ]
null
null
null
src/util/bsendutil2.c
RWTH-OS/MP-MPICH
f2ae296477bb9d812fda587221b3419c09f85b4a
[ "mpich2" ]
null
null
null
src/util/bsendutil2.c
RWTH-OS/MP-MPICH
f2ae296477bb9d812fda587221b3419c09f85b4a
[ "mpich2" ]
1
2021-01-23T11:01:01.000Z
2021-01-23T11:01:01.000Z
/* * $Id$ * * (C) 1993, 1996 by Argonne National Laboratory and * Mississipi State University. * See COPYRIGHT in top-level directory. * * The handling of nonblocking bsend operations needs some work. Currently, * There is a single request for a nonblocking bsend operation, and this can * cause problems when we try to complete a nonblocking bsend operation, becase * both we and the user may have a copy of the same request. * * The solution to this is a little complicated. Note that the MPI standard * requires that you can free an active request (just like the other MPI * objects, freeing an object just decrements its reference count; anything * that makes an object "active" increments its reference count). * So, one solution is to implement this reference count, and then make * use of it here (so that MPI_TEST will execute the Free and set the * pointer to NULL, but the actual free won't happen until the ref count is * set to zero). * * But to really do this, we need have some way to complete a nonblocking * operation even though the user will never again call it with a WAIT * or TEST call. * * As a short term fix, we ONLY call MPI_TEST in this code for blocking * BSENDs; this is safe, because the ONLY copy of the request is here. * Thus, the test on whether to check a request includes a check on the * blocking nature. Note also that the routine called to free a request * calls a special routine (MPIR_BufferFreeReq), so we can keep the * information here properly updated. * * Another approach, which I discussed with Hubertus, would be to alloc a * new request, have the buffer point at that, and copy all of the relavent * details into the given buffer. * * The "best" thing to do depends on how you interpret the various flavors * of buffered send: * Method 1. Bsend, Ibsend, and Bsend_init/Start all copy the data * into a buffer; when the data is copied, the routines return. In this * case, both Ibsend and Bsend_init/Start should indicate that the * send has completed, since the data INPUT to these routines has * been copied and my now be re-used. (There is, thank goodness, no * Ibs(ync)send). Note that in this case, the user's request and the * internal request are VERY different. * * Method 2. Ibsend and Bsend_init would not complete coping data into * the buffer until a later time. This may be intended for systems with * special move engines that operate asynchronously; some mechanism * would be required to determine completion. * * My choice is to copy the request and mark the "users" request as completed * when the data has been moved. */ #include "mpiimpl.h" /* #define DEBUG_BSEND */ #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ static int DebugBsend = 0; #define DEBUG_PRINT(str) PRINTF( "%s\n", str ); #else #define DEBUG_PRINT(str) #endif /* #DEBUG_BSEND_END# */ #include "reqalloc.h" #ifndef MEMCPY #define MEMCPY(a,b,n) memcpy(a,b,n) #endif /* This file contains the code for managing the "Buffered" sends (with a user-provided buffer). This uses the simple buffer scheme described in the MPI standard. Because the data in this list is sensitive, and because we could easily overwrite the data if we are not careful, I've added "Cookies" around the data. */ #define BSEND_HEAD_COOKIE 0xfea7600d #define BSEND_TAIL_COOKIE 0xcadd5ac9 typedef struct _bsenddata { long HeadCookie; struct _bsenddata *next, *prev; MPI_Request req; /* This is the actual request that is used to send the message; note that this is a POINTER to the appropriate structure. It is ALSO not the user's request, in the case that a nonblocking buffered send is used. */ /* area to use */ int len; void *buf; long TailCookie; } BSendData; /* If "req" is null, the block is not in use */ static BSendData *Bsend = 0; static int BsendSize = 0; BSendData *MPIR_MergeBlock ANSI_ARGS(( BSendData *)); /* * The algorithm and the routines. * The basic operation is started by MPI_Ibsend. A MPI_Bsend just does * MPI_Ibsend and MPI_Wait. These call * * MPIR_BsendInitBuffer( ) - to initialize bsend buffer * MPIR_BsendRelease( ) - to release bsend buffer (first completing * all communication). * MPIR_BsendAlloc( ) - to allocate space for the bsend buffer for a * Ibsend/Bsend_init, as well as the request that will * be used internally. * MPIR_BsendFree( ) - to release space * MPIR_BsendStart( ) - to begin a send (copy data and start send) * * Internal routines for buffer management are * MPIR_TestBufferPtr - Tests that bsend arena pointer is ok * MPIR_BsendBufferPrint - prints out the state of the buffer * MPIR_BsendCopyData - Copies data from user area into previously * allocated bsend area. */ /* MPIR_SetBuffer - Set the buffer area for the buffered sends, and initialize the internal data structures */ int MPIR_BsendInitBuffer( void *bufp, int size ) { BSendData *p; DEBUG_PRINT("Starting MPIR_BsendInitBuffer"); if (size < sizeof(BSendData)) return MPIR_Err_setmsg( MPI_ERR_OTHER, MPIR_ERR_BUFFER_TOO_SMALL, (char *)0, (char *)0, (char *)0, sizeof(BSendData) ); if (Bsend) return MPIR_ERRCLASS_TO_CODE(MPI_ERR_BUFFER,MPIR_ERR_BUFFER_EXISTS); p = (BSendData *)bufp; #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Initializing buffer to %d bytes at %lx\n", size, (long) p ); #endif /* #DEBUG_BSEND_END# */ p->next = 0; p->prev = 0; p->req = 0; p->len = size - sizeof(BSendData); p->HeadCookie = BSEND_HEAD_COOKIE; p->TailCookie = BSEND_TAIL_COOKIE; BsendSize = size; Bsend = p; DEBUG_PRINT("Exiting MPIR_BsendInitBuffer" ); return MPI_SUCCESS; } /* Tests that a buffer area has not been corrupted by checking sentinals at the head and tail of a buffer area. */ #define MPIR_TestBufferPtr( b ) \ (((b)->HeadCookie != BSEND_HEAD_COOKIE || \ (b)->TailCookie != BSEND_TAIL_COOKIE)) /* Free a buffer (MPI_BUFFER_DETACH). Note that this will wait to complete any pending operations. This routine is called by MPI_Finalize to make sure than any pending operations are completed. When called, it returns the current buffer and size in its arguments (both are output). */ int MPIR_BsendRelease( void **buf, int *size ) { BSendData *p = Bsend; MPI_Status status; int mpi_errno; DEBUG_PRINT("Entering MPIR_BsendRelease"); /* If we are using the buffer, we must first wait on all pending messages */ while (p) { if (MPIR_TestBufferPtr(p)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "FreeBuffer" ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); } if (p->req) { #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Waiting for release of buffer at %lx with request %lx\n", (long) p, (long)p->req ); #endif /* #DEBUG_BSEND_END# */ MPI_Wait( &p->req, &status ); } p = p->next; } /* Note that this works even when the buffer does not exist */ *buf = (void *)Bsend; *size = BsendSize; Bsend = 0; BsendSize = 0; DEBUG_PRINT("Exiting MPIR_BsendRelease"); return MPI_SUCCESS; } /* This is an internal routine for merging bsend buffer blocks. Merge b with any previous or next empty blocks. Return the block to use next */ BSendData *MPIR_MergeBlock( BSendData *b ) { BSendData *tp, *nextb; int mpi_errno; DEBUG_PRINT("Entering MPIR_MergeBlock" ); nextb = b; tp = b->prev; if (tp && MPIR_TestBufferPtr(tp)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "MergeBlock" ); (void)MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); return 0; } if (tp && tp->req == MPI_REQUEST_NULL) { /* Merge with previous block */ #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Merging block at %lx with next block\n", (long)tp ); #endif /* #DEBUG_BSEND_END# */ tp->next = b->next; if (b->next) b->next->prev = tp; tp->len += b->len + sizeof(BSendData); b = tp; nextb = b; } tp = b->next; if (tp && MPIR_TestBufferPtr(tp)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "MergeBlock" ); (void)MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); return 0; } if (tp && tp->req == MPI_REQUEST_NULL) { /* Merge with next block */ #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Merging block at %lx with previous block at %lx\n", (long)tp, (long)b ); #endif /* #DEBUG_BSEND_END# */ b->next = tp->next; if (tp->next) tp->next->prev = b->prev; b->len += tp->len + sizeof(BSendData); } DEBUG_PRINT("Exiting MPIR_MergeBlock"); return nextb; } /* The input to this routine is a size (in bytes) and an already created MPI_Request; the output is a pointer to the allocated buffer space. It also holds all of the information needed to pack the data, in the event that this is a persistent, non-blocking, buffered send (!). Note that this must be called ONLY after all other fields in the incoming request are set. This routine will modify the request by marking it as completed. */ int MPIR_BsendAlloc( int size, MPI_Request rq, void **bufp ) { BSendData *b, *new; int flag; MPI_Status status; int mpi_errno; DEBUG_PRINT("Entering MPIR_BsendAlloc"); /* Round size to a multiple of 8 */ if (size & 0x7) size += (8 - (size & 0x7)); do { b = Bsend; while (b) { if (MPIR_TestBufferPtr(b)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "BsendAlloc" ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); } /* Note that since the request in the bsend data is private, we can always execute this test */ if (b->req != MPI_REQUEST_NULL/* && !b->req->shandle.is_complete*/) { /* Test for completion; merge if necessary. If the request is not active, we don't do the test. */ #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Testing for completion of block at %lx\n", (long)b ); #endif /* #DEBUG_BSEND_END# */ MPI_Test( &b->req, &flag, &status ); /* If completed and not persistant, remove */ if (flag && !b->req) { #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Found completed bsend\n" ); #endif /* #DEBUG_BSEND_END# */ /* Done; merge the blocks and test again */ b = MPIR_MergeBlock( b ); continue; } } if (b->req == MPI_REQUEST_NULL) { /* Try to merge with surrounding blocks */ b = MPIR_MergeBlock( b ); } if (b->req == MPI_REQUEST_NULL && b->len >= size) { MPIR_SHANDLE *shandle; /* Split the block if there is enough room */ if (b->len > size + (int)sizeof(BSendData) + 8) { #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Found large block of size %d (need %d) at %lx\n", b->len, size, (long)b ); #endif /* #DEBUG_BSEND_END# */ new = (BSendData *)(((char *)b) + sizeof(BSendData) + size); new->next = b->next; if (b->next) b->next->prev = new; new->prev = b; b->next = new; new->len = b->len - size - sizeof(BSendData); new->req = MPI_REQUEST_NULL; new->HeadCookie = BSEND_HEAD_COOKIE; new->TailCookie = BSEND_TAIL_COOKIE; b->len = size; } #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Creating bsend block at %lx of size %d\n", (long)b, size ); #endif /* #DEBUG_BSEND_END# */ *bufp = (void *)(b+1); /* Create a local request to use */ /* BUG - This should be allocated in place */ MPID_Send_alloc(shandle); if (!shandle) return MPI_ERR_EXHAUSTED; b->req = (MPI_Request)shandle; MPID_Request_init( (MPI_Request)shandle, MPIR_SEND ); /* MEMCPY( b->req, rq, sizeof(MPIR_SHANDLE) ); */ /* Save the buffer address */ b->buf = *bufp; /* Mark in the request (user's) where the corresponding bsend area is */ rq->shandle.bsend = (void *)b; /* Also remember in the bsend request */ b->req->shandle.bsend = (void *)b; DEBUG_PRINT("Exiting MPIR_BsendAlloc"); return MPI_SUCCESS; } b = b->next; } } while (MPID_DeviceCheck( MPID_NOTBLOCKING ) != -1); /* Formally, we don't need the DeviceCheck here; it is the user's responsibility to provide enough buffering. However, doing this gives us a better chance that user's program will run anyway, and since the program is erroneous if we get here, the behavior is up to the implementation. We try to be nice to the user. */ #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ FPRINTF( stdout, "Could not find %d bytes in buffer\n", size ); MPIR_BsendBufferPrint(); #endif /* #DEBUG_BSEND_END# */ DEBUG_PRINT("Exiting MPIR_BsendAlloc"); return MPIR_ERRCLASS_TO_CODE(MPI_ERR_BUFFER,MPIR_ERR_USER_BUFFER_EXHAUSTED); } /* This routine actually transfers the data from the users buffer to the internal buffer. A bsend area must already exist for it, and be marked by bine set in the rq->bsend field (see the MPIR_SHANDLE structure). */ void MPIR_BsendCopyData( MPIR_SHANDLE *shandle, struct MPIR_COMMUNICATOR *comm_ptr, void *buf, int count, struct MPIR_DATATYPE *dtype_ptr, void **bsend_buf, int *bsend_len ) { BSendData *b; int outcount, position = 0; int mpi_errno; /* MPIR_SHANDLE *brq; */ DEBUG_PRINT("Entering MPIR_BsendCopyData"); b = (BSendData *)(shandle->bsend); if (!b) { mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_DATA, (char *)0, "Error in BSEND data", (char *)0 ); MPIR_ERROR( comm_ptr, mpi_errno, (char *)0 ); return; } if (MPIR_TestBufferPtr(b)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "BsendCopyData" ); MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); return; } #ifdef FOO /* This really should be the same as rq now... */ brq = (MPIR_SHANDLE *)b->req; if (shandle != brq) { MPIR _ ERROR( MPIR_COMM_WORLD, MPI_ERR_INTERN, "Error in BSEND data; requests do not match" ); } #endif outcount = b->len; MPI_Pack( buf, count, dtype_ptr->self, b->buf, outcount, &position, comm_ptr->self ); *bsend_buf = b->buf; /* The number of bytes actually taken is returned in position */ *bsend_len = position; /* Consistency tests */ if (MPIR_TestBufferPtr(b)) { /* Error in pointer after we've packed into it */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_PREPARE, (char *)0, "Error in BSEND data, corruption detected at end of PrepareBuffer", (char *)0 ); MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); } if (b->next && MPIR_TestBufferPtr(b->next)) { /* Error in pointer after we've packed into it */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_PREPAREDATA, (char *)0, "Error in BSEND data, corruption detected at data end of PrepareBuffer", (char *)0 ); MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); } DEBUG_PRINT("Exiting MPIR_PrepareBuffer"); } /* Set the persistant flag for a request */ void MPIR_BsendPersistent( MPI_Request request, int flag) { BSendData *b; b = (BSendData *)request->shandle.bsend; if (flag) b->req->handle_type = MPIR_PERSISTENT_SEND; else b->req->handle_type = MPIR_SEND; } /* Mark a request as free in the Buffer code This is called only in MPI_Request_free (commreq_free.c) Note that we never want a USER call to free an INTERNAL buffer request. We do this by marking the request used by these routines as a regular send (MPIR_SEND) which it is. We may actually not need this routine, since we handle the case internally in the get/merge code. */ void MPIR_BsendFreeReq( MPIR_SHANDLE *rq) { BSendData *b; int mpi_errno; DEBUG_PRINT("Entering MPIR_BsendFreeReq"); #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ if (DebugBsend) FPRINTF( stderr, "Nulling Bsend request at %lx\n", (long) b ); #endif /* #DEBUG_BSEND_END# */ if (!rq->bsend) return; b = (BSendData *)(rq->bsend); if (MPIR_TestBufferPtr(b)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "FreeBuffer" ); MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); return; } b->req = MPI_REQUEST_NULL; DEBUG_PRINT("Exiting MPIR_BsendFreeReq"); } #ifdef DEBUG_BSEND /* #DEBUG_BSEND_START# */ /* * This is a debugging routine */ int MPIR_BsendBufferPrint( ) { BSendData *b; int mpi_errno; FPRINTF( stdout, "Printing buffer arena\n" ); b = Bsend; while (b) { if (MPIR_TestBufferPtr(b)) { /* Error in pointer */ mpi_errno = MPIR_Err_setmsg( MPI_ERR_INTERN, MPIR_ERR_BSEND_CORRUPT, (char *)0, (char *)0, (char *)0, "PrintBuffer" ); return MPIR_ERROR( MPIR_COMM_WORLD, mpi_errno, (char *)0 ); } FPRINTF( stdout, "%lx : len = %d, req = %lx\n", (long)b, b->len, (long)(b->req) ); b = b->next; } FPRINTF( stdout, "End of printing buffer arena\n" ); return 0; } #endif /* #DEBUG_BSEND_END# */ /* This routine is called by MPI_Start to start an persistent bsend. The incoming requests is the USERS request */ void MPIR_IbsendDatatype( struct MPIR_COMMUNICATOR *comm_ptr, void *buf, int count, struct MPIR_DATATYPE *dtype_ptr, int src_lrank, int tag, int context_id, int dest_grank, MPI_Request request, int *error_code, int dummy ) { MPI_Request bsend_request; int bsend_len; void *bsend_buf; int mpi_errno = MPI_SUCCESS; /* Trivial case first */ if (dest_grank == MPI_PROC_NULL) { (request)->shandle.is_complete = 1; *error_code = MPI_SUCCESS; return; } /* init request */ bsend_request = ((BSendData *)(request->shandle.bsend))->req; MPID_Request_init( (MPI_Request)(&(bsend_request)->shandle), MPIR_SEND ); /* Pack data as necessary into buffer */ MPIR_BsendCopyData( &request->shandle, comm_ptr, buf, count, dtype_ptr, &bsend_buf, &bsend_len ); /* use ISendContig to send the message */ MPID_IsendDatatype( comm_ptr, bsend_buf, bsend_len, MPIR_PACKED_PTR, src_lrank, tag, context_id, dest_grank, bsend_request, &mpi_errno, 1 ); if (mpi_errno) { *error_code = MPIR_ERROR( comm_ptr, mpi_errno, (char *)0 ); } request->shandle.is_complete = 1; }
33.051753
80
0.645974
[ "object" ]
bd3938eb13a37e9b1a04049079da065cf28fd0ce
5,301
h
C
hw/ip/otbn/dv/model/otbn_model.h
KiranaGowda/https-github.com-lowRISC-opentitan
c0b2ef5041e8d34f264b2ddaccc8d5cfdb97b413
[ "Apache-2.0" ]
null
null
null
hw/ip/otbn/dv/model/otbn_model.h
KiranaGowda/https-github.com-lowRISC-opentitan
c0b2ef5041e8d34f264b2ddaccc8d5cfdb97b413
[ "Apache-2.0" ]
null
null
null
hw/ip/otbn/dv/model/otbn_model.h
KiranaGowda/https-github.com-lowRISC-opentitan
c0b2ef5041e8d34f264b2ddaccc8d5cfdb97b413
[ "Apache-2.0" ]
null
null
null
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 #ifndef OPENTITAN_HW_IP_OTBN_DV_MODEL_OTBN_MODEL_H_ #define OPENTITAN_HW_IP_OTBN_DV_MODEL_OTBN_MODEL_H_ #include <cstdint> #include <stdexcept> #include <string> #include <svdpi.h> #include <vector> #include "otbn_memutil.h" struct ISSWrapper; class OtbnModel { public: OtbnModel(const std::string &mem_scope, const std::string &design_scope); ~OtbnModel(); // Replace any current loop warps with those from memutil. Returns 0 // on success or -1 on failure. In the latter case, a message will // already have been written to stderr. int take_loop_warps(const OtbnMemUtil &memutil); // True if this model is running in a simulation that has an RTL // implementation too (which needs checking). bool has_rtl() const { return !design_scope_.empty(); } // Start a new run with the model, writing IMEM/DMEM and jumping to address // zero. Returns 0 on success; -1 on failure. int start(); // Flush EDN data from model because of edn_rst_n void edn_flush(); // EDN Step sends ISS the RND data when ACK signal is high. void edn_rnd_step(svLogicVecVal *edn_rnd_data /* logic [31:0] */); void set_keymgr_value(svLogicVecVal *key0 /* logic [383:0] */, svLogicVecVal *key1 /* logic [383:0] */, unsigned char valid); // EDN Step sends ISS the URND related EDN data when ACK signal is high. void edn_urnd_step(svLogicVecVal *edn_urnd_data /* logic [31:0] */); // Signals that RTL is finished processing RND data from EDN void edn_rnd_cdc_done(); // Signals that RTL is finished processing data from EDN for URND void edn_urnd_cdc_done(); // Step once in the model. Returns 1 if the model has finished, 0 if not and // -1 on failure. If gen_trace is true, pass trace entries to the trace // checker. If the model has finished, writes otbn.ERR_BITS to *err_bits. int step(svBitVecVal *status /* bit [7:0] */, svBitVecVal *insn_cnt /* bit [31:0] */, svBitVecVal *rnd_req /* bit [0:0] */, svBitVecVal *err_bits /* bit [31:0] */, svBitVecVal *stop_pc /* bit [31:0] */); // Check model against RTL (if there is any) when a run has finished. Prints // messages to stderr on failure or mismatch. Returns 1 for a match, 0 for a // mismatch, -1 for some other failure. int check() const; // Grab contents of dmem from the model and load it back into the RTL // simulation. This is used when there's no RTL model of the design. Returns // 0 on success; -1 on failure. int load_dmem(); // Mark all of IMEM as invalid so that any fetch causes an integrity // error. Returns 0 on success; -1 on failure. int invalidate_imem(); // Mark all of DMEM as invalid so that any load causes an integrity // error. Returns 0 on success; -1 on failure. int invalidate_dmem(); // Step CRC by consuming 48 bits of data. // // This doesn't actually update any internal state: we're just using the // otbn_model framework as a convenient connection between SystemVerilog and // Python. Returns 0 on success; -1 on failure. int step_crc(const svBitVecVal *item /* bit [47:0] */, svBitVecVal *state /* bit [31:0] */); // Flush any information in the model void reset(); // React to a lifecycle controller escalation signal. Returns 0 on // success; -1 on failure. int send_lc_escalation(); private: // Constructs an ISS wrapper if necessary. If something goes wrong, this // function prints a message and then returns null. If ensure is true, it // will never return null without printing a message, so error handling at // the callsite can silently return a failure code. ISSWrapper *ensure_wrapper(); // Read the contents of the ISS's memory Ecc32MemArea::EccWords get_sim_memory(bool is_imem) const; // Set the contents of the ISS's memory void set_sim_memory(bool is_imem, const std::vector<uint8_t> &data); // Grab contents of dmem from the model and compare them with the RTL. Prints // messages to stderr on failure or mismatch. Returns true on success; false // on mismatch. Throws a std::runtime_error on failure. bool check_dmem(ISSWrapper &iss) const; // Compare contents of ISS registers with those from the design. Prints // messages to stderr on failure or mismatch. Returns true on success; false // on mismatch. Throws a std::runtime_error on failure. bool check_regs(ISSWrapper &iss) const; // Compare contents of ISS call stack with those from the design. Prints // messages to stderr on failure or mismatch. Returns true on success; false // on mismatch. Throws a std::runtime_error on failure. bool check_call_stack(ISSWrapper &iss) const; // We want to create the model in an initial block in the SystemVerilog // simulation, but might not actually want to spawn the ISS. To handle that // in a non-racy way, the most convenient thing is to spawn the ISS the first // time it's actually needed. Use ensure_iss() to create as needed. std::unique_ptr<ISSWrapper> iss_; OtbnMemUtil mem_util_; std::string design_scope_; }; #endif // OPENTITAN_HW_IP_OTBN_DV_MODEL_OTBN_MODEL_H_
39.559701
79
0.711752
[ "vector", "model" ]
bd413f8452f8b660b5c454d5ac7bb8ba38bb27a2
3,093
h
C
include/flamegpu/pop/MemoryVector.h
Robadob/FLAMEGPU2_dev
3e4106078f754ee90957519fa196a863fd257d1c
[ "MIT" ]
null
null
null
include/flamegpu/pop/MemoryVector.h
Robadob/FLAMEGPU2_dev
3e4106078f754ee90957519fa196a863fd257d1c
[ "MIT" ]
null
null
null
include/flamegpu/pop/MemoryVector.h
Robadob/FLAMEGPU2_dev
3e4106078f754ee90957519fa196a863fd257d1c
[ "MIT" ]
null
null
null
/* * MemoryVector.h * */ #ifndef INCLUDE_FLAMEGPU_POP_MEMORYVECTOR_H_ #define INCLUDE_FLAMEGPU_POP_MEMORYVECTOR_H_ #include <vector> #include <ostream> #include <typeinfo> #include <map> #include <memory> #include <utility> #include <string> #include "flamegpu/exception/FGPUException.h" class GenericMemoryVector{ public: virtual ~GenericMemoryVector() { ; } virtual const std::type_info& getType() = 0; virtual void* getDataPtr() = 0; virtual const void* getReadOnlyDataPtr() const = 0; virtual void* getVectorPtr() = 0; virtual GenericMemoryVector* clone() const = 0; virtual void resize(unsigned int) = 0; template <typename T> std::vector<T>& getVector(); template <typename T> std::vector<T> getVectorIteratorAt(unsigned int i); }; template <typename T> class MemoryVector : public GenericMemoryVector { public: MemoryVector() : GenericMemoryVector(), type(typeid(T)) { default_value = T(); } virtual ~MemoryVector() { ; } virtual const std::type_info& getType() { return type; } virtual void* getDataPtr() { if (vec.empty()) return nullptr; else return &(vec.front()); } virtual const void* getReadOnlyDataPtr() const { if (vec.empty()) return nullptr; else return &(vec.front()); } virtual void* getVectorPtr() { return static_cast<void*>(&vec); } virtual MemoryVector<T>* clone() const { return (new MemoryVector<T>()); } virtual void resize(unsigned int s) { vec.resize(s); } protected: std::vector<T> vec; T default_value; const std::type_info& type; }; template <typename T> std::vector<T>& GenericMemoryVector::getVector() { if (getType() != typeid(T)) { THROW InvalidVarType("Wrong variable type passed to GenericMemoryVector::getVector(). " "This agent data vector expects '%s', but '%s' was requested.", getType().name(), typeid(T).name()); } // must cast the vector as the correct type std::vector<T> *t_v = static_cast<std::vector<T>*>(getVectorPtr()); // return reference return *t_v; } template <typename T> std::vector<T> GenericMemoryVector::getVectorIteratorAt(unsigned int i) { // return an iterator at correct position std::vector<T>& v = getVector<T>(); return (v.begin() + i); } // use this to store default values for a population, must be here to register the correct types at compile time /*! Create a map with std::strings for keys (indexes) and GenericAgentMemoryVector object. A smart pointer has been used to automaticaly manage the object*/ typedef std::map<const std::string, std::unique_ptr<GenericMemoryVector>> StateMemoryMap; /*! Create a pair with std::strings for keys (indexes) and GenericAgentMemoryVector object. A smart pointer has been used to automaticaly manage the object*/ typedef std::pair<const std::string, std::unique_ptr<GenericMemoryVector>> StateMemoryMapPair; #endif // INCLUDE_FLAMEGPU_POP_MEMORYVECTOR_H_
27.864865
158
0.667313
[ "object", "vector" ]
bd441093fe62caf4632ccb90a99e7253c0144524
2,454
h
C
chrome/browser/spellchecker/spell_check_host_impl.h
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
chrome/browser/spellchecker/spell_check_host_impl.h
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
chrome/browser/spellchecker/spell_check_host_impl.h
metux/chromium-deb
3c08e9b89a1b6f95f103a61ff4f528dbcd57fc42
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_SPELLCHECKER_SPELL_CHECK_HOST_IMPL_H_ #define CHROME_BROWSER_SPELLCHECKER_SPELL_CHECK_HOST_IMPL_H_ #include "base/macros.h" #include "components/spellcheck/browser/spelling_service_client.h" #include "components/spellcheck/common/spellcheck.mojom.h" #include "components/spellcheck/spellcheck_build_features.h" #if !BUILDFLAG(ENABLE_SPELLCHECK) #error "Spellcheck should be enabled." #endif class SpellcheckCustomDictionary; class SpellcheckService; struct SpellCheckResult; class SpellCheckHostImpl : public spellcheck::mojom::SpellCheckHost { public: explicit SpellCheckHostImpl(int render_process_id); ~SpellCheckHostImpl() override; static void Create(int render_process_id, spellcheck::mojom::SpellCheckHostRequest request); private: friend class TestSpellCheckHostImpl; // spellcheck::mojom::SpellCheckHost: void RequestDictionary() override; void NotifyChecked(const base::string16& word, bool misspelled) override; void CallSpellingService(const base::string16& text, CallSpellingServiceCallback callback) override; #if !BUILDFLAG(USE_BROWSER_SPELLCHECKER) // Invoked when the remote Spelling service has finished checking the // text of a CallSpellingService request. void CallSpellingServiceDone( CallSpellingServiceCallback callback, bool success, const base::string16& text, const std::vector<SpellCheckResult>& service_results) const; // Filter out spelling corrections of custom dictionary words from the // Spelling service results. static std::vector<SpellCheckResult> FilterCustomWordResults( const std::string& text, const SpellcheckCustomDictionary& custom_dictionary, const std::vector<SpellCheckResult>& service_results); #endif // Returns the SpellcheckService of our |render_process_id_|. The return // is null if the render process is being shut down. virtual SpellcheckService* GetSpellcheckService() const; // The process ID of our render process host. const int render_process_id_; // A JSON-RPC client that calls the remote Spelling service. SpellingServiceClient client_; DISALLOW_COPY_AND_ASSIGN(SpellCheckHostImpl); }; #endif // CHROME_BROWSER_SPELLCHECKER_SPELL_CHECK_HOST_IMPL_H_
35.057143
75
0.781174
[ "render", "vector" ]
bd451ae5190afb2a552dbe75bd3005eb9bf1aa6c
8,661
h
C
extensions/gen/mojo/public/js/test/module_b_1.test-mojom-blink.h
blockspacer/chromium_base_conan
b4749433cf34f54d2edff52e2f0465fec8cb9bad
[ "Apache-2.0", "BSD-3-Clause" ]
6
2020-12-22T05:48:31.000Z
2022-02-08T19:49:49.000Z
extensions/gen/mojo/public/js/test/module_b_1.test-mojom-blink.h
blockspacer/chromium_base_conan
b4749433cf34f54d2edff52e2f0465fec8cb9bad
[ "Apache-2.0", "BSD-3-Clause" ]
4
2020-05-22T18:36:43.000Z
2021-05-19T10:20:23.000Z
extensions/gen/mojo/public/js/test/module_b_1.test-mojom-blink.h
blockspacer/chromium_base_conan
b4749433cf34f54d2edff52e2f0465fec8cb9bad
[ "Apache-2.0", "BSD-3-Clause" ]
2
2019-12-06T11:48:16.000Z
2021-09-16T04:44:47.000Z
// mojo/public/js/test/module_b_1.test-mojom-blink.h is auto generated by mojom_bindings_generator.py, do not edit // Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MOJO_PUBLIC_JS_TEST_MODULE_B_1_TEST_MOJOM_BLINK_H_ #define MOJO_PUBLIC_JS_TEST_MODULE_B_1_TEST_MOJOM_BLINK_H_ #include <stdint.h> #include <limits> #include <type_traits> #include <utility> #include "base/callback.h" #include "base/macros.h" #include "base/optional.h" #include "mojo/public/cpp/bindings/clone_traits.h" #include "mojo/public/cpp/bindings/equals_traits.h" #include "mojo/public/cpp/bindings/lib/serialization.h" #include "mojo/public/cpp/bindings/struct_ptr.h" #include "mojo/public/cpp/bindings/struct_traits.h" #include "mojo/public/cpp/bindings/union_traits.h" #include "third_party/perfetto/include/perfetto/tracing/traced_value_forward.h" #include "mojo/public/js/test/module_b_1.test-mojom-shared.h" #include "mojo/public/js/test/module_b_1.test-mojom-blink-forward.h" #include "mojo/public/cpp/bindings/lib/wtf_clone_equals_util.h" #include "mojo/public/cpp/bindings/lib/wtf_hash_util.h" #include "third_party/blink/renderer/platform/wtf/hash_functions.h" #include "third_party/blink/renderer/platform/wtf/text/wtf_string.h" #ifdef KYTHE_IS_RUNNING #pragma kythe_inline_metadata "Metadata comment" #endif namespace module_b { namespace blink { // @generated_from: module_b.TestStructB1 class TestStructB1 { public: template <typename T> using EnableIfSame = std::enable_if_t<std::is_same<TestStructB1, T>::value>; using DataView = TestStructB1DataView; using Data_ = internal::TestStructB1_Data; template <typename... Args> static TestStructB1Ptr New(Args&&... args) { return TestStructB1Ptr( base::in_place, std::forward<Args>(args)...); } template <typename U> static TestStructB1Ptr From(const U& u) { return mojo::TypeConverter<TestStructB1Ptr, U>::Convert(u); } template <typename U> U To() const { return mojo::TypeConverter<U, TestStructB1>::Convert(*this); } TestStructB1(); TestStructB1( int32_t x, int32_t y); ~TestStructB1(); // Clone() is a template so it is only instantiated if it is used. Thus, the // bindings generator does not need to know whether Clone() or copy // constructor/assignment are available for members. template <typename StructPtrType = TestStructB1Ptr> TestStructB1Ptr Clone() const; // Equals() is a template so it is only instantiated if it is used. Thus, the // bindings generator does not need to know whether Equals() or == operator // are available for members. template <typename T, TestStructB1::EnableIfSame<T>* = nullptr> bool Equals(const T& other) const; template <typename T, TestStructB1::EnableIfSame<T>* = nullptr> bool operator==(const T& rhs) const { return Equals(rhs); } size_t Hash(size_t seed) const; template <typename UserType> static WTF::Vector<uint8_t> Serialize(UserType* input) { return mojo::internal::SerializeImpl< TestStructB1::DataView, WTF::Vector<uint8_t>>(input); } template <typename UserType> static mojo::Message SerializeAsMessage(UserType* input) { return mojo::internal::SerializeAsMessageImpl< TestStructB1::DataView>(input); } // The returned Message is serialized only if the message is moved // cross-process or cross-language. Otherwise if the message is Deserialized // as the same UserType |input| will just be moved to |output| in // DeserializeFromMessage. template <typename UserType> static mojo::Message WrapAsMessage(UserType input) { return mojo::Message(std::make_unique< internal::TestStructB1_UnserializedMessageContext< UserType, TestStructB1::DataView>>(0, 0, std::move(input)), MOJO_CREATE_MESSAGE_FLAG_NONE); } template <typename UserType> static bool Deserialize(const void* data, size_t data_num_bytes, UserType* output) { mojo::Message message; return mojo::internal::DeserializeImpl<TestStructB1::DataView>( message, data, data_num_bytes, output, Validate); } template <typename UserType> static bool Deserialize(const WTF::Vector<uint8_t>& input, UserType* output) { return TestStructB1::Deserialize( input.size() == 0 ? nullptr : &input.front(), input.size(), output); } template <typename UserType> static bool DeserializeFromMessage(mojo::Message input, UserType* output) { auto context = input.TakeUnserializedContext< internal::TestStructB1_UnserializedMessageContext< UserType, TestStructB1::DataView>>(); if (context) { *output = std::move(context->TakeData()); return true; } input.SerializeIfNecessary(); return mojo::internal::DeserializeImpl<TestStructB1::DataView>( input, input.payload(), input.payload_num_bytes(), output, Validate); } // @generated_from: module_b.TestStructB1.x int32_t x; // @generated_from: module_b.TestStructB1.y int32_t y; // Serialise this struct into a trace. void WriteIntoTracedValue(perfetto::TracedValue context) const; private: static bool Validate(const void* data, mojo::internal::ValidationContext* validation_context); }; // The comparison operators are templates, so they are only instantiated if they // are used. Thus, the bindings generator does not need to know whether // comparison operators are available for members. template <typename T, TestStructB1::EnableIfSame<T>* = nullptr> bool operator<(const T& lhs, const T& rhs); template <typename T, TestStructB1::EnableIfSame<T>* = nullptr> bool operator<=(const T& lhs, const T& rhs) { return !(rhs < lhs); } template <typename T, TestStructB1::EnableIfSame<T>* = nullptr> bool operator>(const T& lhs, const T& rhs) { return rhs < lhs; } template <typename T, TestStructB1::EnableIfSame<T>* = nullptr> bool operator>=(const T& lhs, const T& rhs) { return !(lhs < rhs); } template <typename StructPtrType> TestStructB1Ptr TestStructB1::Clone() const { return New( mojo::Clone(x), mojo::Clone(y) ); } template <typename T, TestStructB1::EnableIfSame<T>*> bool TestStructB1::Equals(const T& other_struct) const { if (!mojo::Equals(this->x, other_struct.x)) return false; if (!mojo::Equals(this->y, other_struct.y)) return false; return true; } template <typename T, TestStructB1::EnableIfSame<T>*> bool operator<(const T& lhs, const T& rhs) { if (lhs.x < rhs.x) return true; if (rhs.x < lhs.x) return false; if (lhs.y < rhs.y) return true; if (rhs.y < lhs.y) return false; return false; } } // namespace blink } // namespace module_b namespace mojo { template <> struct StructTraits<::module_b::blink::TestStructB1::DataView, ::module_b::blink::TestStructB1Ptr> { static bool IsNull(const ::module_b::blink::TestStructB1Ptr& input) { return !input; } static void SetToNull(::module_b::blink::TestStructB1Ptr* output) { output->reset(); } static decltype(::module_b::blink::TestStructB1::x) x( const ::module_b::blink::TestStructB1Ptr& input) { return input->x; } static decltype(::module_b::blink::TestStructB1::y) y( const ::module_b::blink::TestStructB1Ptr& input) { return input->y; } static bool Read(::module_b::blink::TestStructB1::DataView input, ::module_b::blink::TestStructB1Ptr* output); }; } // namespace mojo #endif // MOJO_PUBLIC_JS_TEST_MODULE_B_1_TEST_MOJOM_BLINK_H_ /* Metadata comment eyJtZXRhIjogW3siZW5kIjogMTUzNSwgImJlZ2luIjogMTUyMywgImVkZ2UiOiAiJS9reXRoZS9l ZGdlL2dlbmVyYXRlcyIsICJ0eXBlIjogImFuY2hvcl9kZWZpbmVzIiwgInZuYW1lIjogeyJjb3Jw dXMiOiAiY2hyb21pdW0uZ29vZ2xlc291cmNlLmNvbS9jaHJvbWl1bS9zcmMiLCAibGFuZ3VhZ2Ui OiAibW9qb20iLCAic2lnbmF0dXJlIjogIm1vZHVsZV9iLlRlc3RTdHJ1Y3RCMSJ9fSwgeyJlbmQi OiA1MTUyLCAiYmVnaW4iOiA1MTUxLCAiZWRnZSI6ICIlL2t5dGhlL2VkZ2UvZ2VuZXJhdGVzIiwg InR5cGUiOiAiYW5jaG9yX2RlZmluZXMiLCAidm5hbWUiOiB7ImNvcnB1cyI6ICJjaHJvbWl1bS5n b29nbGVzb3VyY2UuY29tL2Nocm9taXVtL3NyYyIsICJsYW5ndWFnZSI6ICJtb2pvbSIsICJzaWdu YXR1cmUiOiAibW9kdWxlX2IuVGVzdFN0cnVjdEIxLngifX0sIHsiZW5kIjogNTIxMiwgImJlZ2lu IjogNTIxMSwgImVkZ2UiOiAiJS9reXRoZS9lZGdlL2dlbmVyYXRlcyIsICJ0eXBlIjogImFuY2hv cl9kZWZpbmVzIiwgInZuYW1lIjogeyJjb3JwdXMiOiAiY2hyb21pdW0uZ29vZ2xlc291cmNlLmNv bS9jaHJvbWl1bS9zcmMiLCAibGFuZ3VhZ2UiOiAibW9qb20iLCAic2lnbmF0dXJlIjogIm1vZHVs ZV9iLlRlc3RTdHJ1Y3RCMS55In19XSwgInR5cGUiOiAia3l0aGUwIn0= */
32.56015
114
0.735596
[ "vector" ]
bd459c10ab5241ad14499a129dad267729113592
376
h
C
doc/prototypes/load_test.h
BackupTheBerlios/rubyk
a885b079633073da259941c6dc05ad0c419d3f72
[ "MIT" ]
1
2020-11-14T20:36:21.000Z
2020-11-14T20:36:21.000Z
doc/prototypes/load_test.h
BackupTheBerlios/rubyk
a885b079633073da259941c6dc05ad0c419d3f72
[ "MIT" ]
null
null
null
doc/prototypes/load_test.h
BackupTheBerlios/rubyk
a885b079633073da259941c6dc05ad0c419d3f72
[ "MIT" ]
null
null
null
#ifndef _LOAD_TEST_H_ #define _LOAD_TEST_H_ #include <string> #include <vector> #include <iostream> struct Dummy { void declare (char * name) { mList.push_back(std::string(name)); } void print() { int i; for(i = 0;i<mList.size();i++) { std::cout << i << " : " << mList[i] << std::endl; } } std::vector<std::string> mList; }; #endif
14.461538
55
0.569149
[ "vector" ]
bd48f563423d0f283293cb42a6fa67ec746cba3e
10,576
c
C
software/libdvi/dvi.c
lowfatcode/PicoDVI
0af9473d46e44e0dd1d0138fa51bc4d86ef33e4b
[ "BSD-3-Clause" ]
1
2022-01-08T01:15:43.000Z
2022-01-08T01:15:43.000Z
software/libdvi/dvi.c
lowfatcode/PicoDVI
0af9473d46e44e0dd1d0138fa51bc4d86ef33e4b
[ "BSD-3-Clause" ]
null
null
null
software/libdvi/dvi.c
lowfatcode/PicoDVI
0af9473d46e44e0dd1d0138fa51bc4d86ef33e4b
[ "BSD-3-Clause" ]
1
2021-03-26T23:38:49.000Z
2021-03-26T23:38:49.000Z
#include <stdlib.h> #include "hardware/dma.h" #include "hardware/irq.h" #include "dvi.h" #include "dvi_timing.h" #include "dvi_serialiser.h" #include "tmds_encode.h" // Time-critical functions pulled into RAM but each in a unique section to // allow garbage collection #define __dvi_func(f) __not_in_flash_func(f) #define __dvi_func_x(f) __scratch_x(__STRING(f)) f // We require exclusive use of a DMA IRQ line. (you wouldn't want to share // anyway). It's possible in theory to hook both IRQs and have two DVI outs. static struct dvi_inst *dma_irq_privdata[2]; static void dvi_dma0_irq(); static void dvi_dma1_irq(); void dvi_init(struct dvi_inst *inst, uint spinlock_tmds_queue, uint spinlock_colour_queue) { dvi_timing_state_init(&inst->timing_state); dvi_serialiser_init(&inst->ser_cfg); for (int i = 0; i < N_TMDS_LANES; ++i) { inst->dma_cfg[i].chan_ctrl = dma_claim_unused_channel(true); inst->dma_cfg[i].chan_data = dma_claim_unused_channel(true); inst->dma_cfg[i].tx_fifo = (void*)&inst->ser_cfg.pio->txf[inst->ser_cfg.sm_tmds[i]]; inst->dma_cfg[i].dreq = pio_get_dreq(inst->ser_cfg.pio, inst->ser_cfg.sm_tmds[i], true); } inst->late_scanline_ctr = 0; inst->tmds_buf_release_next = NULL; inst->tmds_buf_release = NULL; queue_init_with_spinlock(&inst->q_tmds_valid, sizeof(void*), 8, spinlock_tmds_queue); queue_init_with_spinlock(&inst->q_tmds_free, sizeof(void*), 8, spinlock_tmds_queue); queue_init_with_spinlock(&inst->q_colour_valid, sizeof(void*), 8, spinlock_colour_queue); queue_init_with_spinlock(&inst->q_colour_free, sizeof(void*), 8, spinlock_colour_queue); dvi_setup_scanline_for_vblank(inst->timing, inst->dma_cfg, true, &inst->dma_list_vblank_sync); dvi_setup_scanline_for_vblank(inst->timing, inst->dma_cfg, false, &inst->dma_list_vblank_nosync); dvi_setup_scanline_for_active(inst->timing, inst->dma_cfg, (void*)SRAM_BASE, &inst->dma_list_active); dvi_setup_scanline_for_active(inst->timing, inst->dma_cfg, NULL, &inst->dma_list_error); for (int i = 0; i < DVI_N_TMDS_BUFFERS; ++i) { void *tmdsbuf; #if DVI_MONOCHROME_TMDS tmdsbuf = malloc(inst->timing->h_active_pixels / DVI_SYMBOLS_PER_WORD * sizeof(uint32_t)); #else tmdsbuf = malloc(3 * inst->timing->h_active_pixels / DVI_SYMBOLS_PER_WORD * sizeof(uint32_t)); #endif if (!tmdsbuf) panic("TMDS buffer allocation failed"); queue_add_blocking_u32(&inst->q_tmds_free, &tmdsbuf); } } // The IRQs will run on whichever core calls this function (this is why it's // called separately from dvi_init) void dvi_register_irqs_this_core(struct dvi_inst *inst, uint irq_num) { uint32_t mask_sync_channel = 1u << inst->dma_cfg[TMDS_SYNC_LANE].chan_data; uint32_t mask_all_channels = 0; for (int i = 0; i < N_TMDS_LANES; ++i) mask_all_channels |= 1u << inst->dma_cfg[i].chan_ctrl | 1u << inst->dma_cfg[i].chan_data; dma_hw->ints0 = mask_sync_channel; if (irq_num == DMA_IRQ_0) { hw_write_masked(&dma_hw->inte0, mask_sync_channel, mask_all_channels); dma_irq_privdata[0] = inst; irq_set_exclusive_handler(DMA_IRQ_0, dvi_dma0_irq); } else { hw_write_masked(&dma_hw->inte1, mask_sync_channel, mask_all_channels); dma_irq_privdata[1] = inst; irq_set_exclusive_handler(DMA_IRQ_1, dvi_dma1_irq); } irq_set_enabled(irq_num, true); } // Set up control channels to make transfers to data channels' control // registers (but don't trigger the control channels -- this is done either by // data channel CHAIN_TO or an initial write to MULTI_CHAN_TRIGGER) static inline void __attribute__((always_inline)) _dvi_load_dma_op(const struct dvi_lane_dma_cfg dma_cfg[], struct dvi_scanline_dma_list *l) { for (int i = 0; i < N_TMDS_LANES; ++i) { dma_channel_config cfg = dma_channel_get_default_config(dma_cfg[i].chan_ctrl); channel_config_set_ring(&cfg, true, 4); // 16-byte write wrap channel_config_set_read_increment(&cfg, true); channel_config_set_write_increment(&cfg, true); dma_channel_configure( dma_cfg[i].chan_ctrl, &cfg, &dma_hw->ch[dma_cfg[i].chan_data], dvi_lane_from_list(l, i), 4, // Configure all 4 registers then halt until next CHAIN_TO false ); } } // Setup first set of control block lists, configure the control channels, and // trigger them. Control channels will subsequently be triggered only by DMA // CHAIN_TO on data channel completion. IRQ handler *must* be prepared before // calling this. (Hooked to DMA IRQ0) void dvi_start(struct dvi_inst *inst) { _dvi_load_dma_op(inst->dma_cfg, &inst->dma_list_vblank_nosync); dma_start_channel_mask( (1u << inst->dma_cfg[0].chan_ctrl) | (1u << inst->dma_cfg[1].chan_ctrl) | (1u << inst->dma_cfg[2].chan_ctrl)); // We really don't want the FIFOs to bottom out, so wait for full before // starting the shift-out. for (int i = 0; i < N_TMDS_LANES; ++i) while (!pio_sm_is_tx_fifo_full(inst->ser_cfg.pio, inst->ser_cfg.sm_tmds[i])) tight_loop_contents(); dvi_serialiser_enable(&inst->ser_cfg, true); } static inline void __dvi_func_x(_dvi_prepare_scanline_8bpp)(struct dvi_inst *inst, uint32_t *scanbuf) { uint32_t *tmdsbuf; queue_remove_blocking_u32(&inst->q_tmds_free, &tmdsbuf); uint pixwidth = inst->timing->h_active_pixels; uint words_per_channel = pixwidth / DVI_SYMBOLS_PER_WORD; // TODO maybe want to make this configurable one day // anyhoo we are abutting the buffers in TMDS channel order const uint red_msb = 7; const uint red_lsb = 5; const uint green_msb = 4; const uint green_lsb = 2; const uint blue_msb = 1; const uint blue_lsb = 0; // Scanline buffers are half-resolution; the functions take the number of *input* pixels as parameter. tmds_encode_data_channel_8bpp(scanbuf, tmdsbuf + 0 * words_per_channel, pixwidth / 2, blue_msb, blue_lsb); tmds_encode_data_channel_8bpp(scanbuf, tmdsbuf + 1 * words_per_channel, pixwidth / 2, green_msb, green_lsb); tmds_encode_data_channel_8bpp(scanbuf, tmdsbuf + 2 * words_per_channel, pixwidth / 2, red_msb, red_lsb); queue_add_blocking_u32(&inst->q_tmds_valid, &tmdsbuf); } static inline void __dvi_func_x(_dvi_prepare_scanline_16bpp)(struct dvi_inst *inst, uint32_t *scanbuf) { uint32_t *tmdsbuf; queue_remove_blocking_u32(&inst->q_tmds_free, &tmdsbuf); uint pixwidth = inst->timing->h_active_pixels; uint words_per_channel = pixwidth / DVI_SYMBOLS_PER_WORD; const uint red_msb = 15; const uint red_lsb = 11; const uint green_msb = 10; const uint green_lsb = 5; const uint blue_msb = 4; const uint blue_lsb = 0; tmds_encode_data_channel_16bpp(scanbuf, tmdsbuf + 0 * words_per_channel, pixwidth / 2, blue_msb, blue_lsb); tmds_encode_data_channel_16bpp(scanbuf, tmdsbuf + 1 * words_per_channel, pixwidth / 2, green_msb, green_lsb); tmds_encode_data_channel_16bpp(scanbuf, tmdsbuf + 2 * words_per_channel, pixwidth / 2, red_msb, red_lsb); queue_add_blocking_u32(&inst->q_tmds_valid, &tmdsbuf); } // "Worker threads" for TMDS encoding (core enters and never returns, but still handles IRQs) // Version where each record in q_colour_valid is one scanline: void __dvi_func(dvi_scanbuf_main_8bpp)(struct dvi_inst *inst) { uint y = 0; while (1) { uint32_t *scanbuf; queue_remove_blocking_u32(&inst->q_colour_valid, &scanbuf); _dvi_prepare_scanline_8bpp(inst, scanbuf); queue_add_blocking_u32(&inst->q_colour_free, &scanbuf); ++y; if (y == inst->timing->v_active_lines) { y = 0; } } __builtin_unreachable(); } // Ugh copy/paste but it lets us garbage collect the TMDS stuff that is not being used from .scratch_x void __dvi_func(dvi_scanbuf_main_16bpp)(struct dvi_inst *inst) { uint y = 0; while (1) { uint32_t *scanbuf; queue_remove_blocking_u32(&inst->q_colour_valid, &scanbuf); _dvi_prepare_scanline_16bpp(inst, scanbuf); queue_add_blocking_u32(&inst->q_colour_free, &scanbuf); ++y; if (y == inst->timing->v_active_lines) { y = 0; } } __builtin_unreachable(); } static void __dvi_func(dvi_dma_irq_handler)(struct dvi_inst *inst) { // Every fourth interrupt marks the start of the horizontal active region. We // now have until the end of this region to generate DMA blocklist for next // scanline. dvi_timing_state_advance(inst->timing, &inst->timing_state); if (inst->tmds_buf_release && !queue_try_add_u32(&inst->q_tmds_free, &inst->tmds_buf_release)) panic("TMDS free queue full in IRQ!"); inst->tmds_buf_release = inst->tmds_buf_release_next; inst->tmds_buf_release_next = NULL; // Make sure all three channels have definitely loaded their last block // (should be within a few cycles of one another) for (int i = 0; i < N_TMDS_LANES; ++i) { while (dma_debug_hw->ch[inst->dma_cfg[i].chan_data].tcr != inst->timing->h_active_pixels / DVI_SYMBOLS_PER_WORD) tight_loop_contents(); } uint32_t *tmdsbuf; while (inst->late_scanline_ctr > 0 && queue_try_remove_u32(&inst->q_tmds_valid, &tmdsbuf)) { // If we displayed this buffer then it would be in the wrong vertical // position on-screen. Just pass it back. queue_add_blocking_u32(&inst->q_tmds_free, &tmdsbuf); --inst->late_scanline_ctr; } if (inst->timing_state.v_state != DVI_STATE_ACTIVE) { // Don't care tmdsbuf = NULL; } else if (queue_try_peek_u32(&inst->q_tmds_valid, &tmdsbuf)) { if (inst->timing_state.v_ctr % DVI_VERTICAL_REPEAT == DVI_VERTICAL_REPEAT - 1) { queue_remove_blocking_u32(&inst->q_tmds_valid, &tmdsbuf); inst->tmds_buf_release_next = tmdsbuf; } } else { // No valid scanline was ready (generates solid red scanline) tmdsbuf = NULL; if (inst->timing_state.v_ctr % DVI_VERTICAL_REPEAT == DVI_VERTICAL_REPEAT - 1) ++inst->late_scanline_ctr; } switch (inst->timing_state.v_state) { case DVI_STATE_ACTIVE: if (tmdsbuf) { dvi_update_scanline_data_dma(inst->timing, tmdsbuf, &inst->dma_list_active); _dvi_load_dma_op(inst->dma_cfg, &inst->dma_list_active); } else { _dvi_load_dma_op(inst->dma_cfg, &inst->dma_list_error); } if (inst->scanline_callback && inst->timing_state.v_ctr % DVI_VERTICAL_REPEAT == DVI_VERTICAL_REPEAT - 1) { inst->scanline_callback(); } break; case DVI_STATE_SYNC: _dvi_load_dma_op(inst->dma_cfg, &inst->dma_list_vblank_sync); break; default: _dvi_load_dma_op(inst->dma_cfg, &inst->dma_list_vblank_nosync); break; } } static void __dvi_func(dvi_dma0_irq)() { struct dvi_inst *inst = dma_irq_privdata[0]; dma_hw->ints0 = 1u << inst->dma_cfg[TMDS_SYNC_LANE].chan_data; dvi_dma_irq_handler(inst); } static void __dvi_func(dvi_dma1_irq)() { struct dvi_inst *inst = dma_irq_privdata[1]; dma_hw->ints1 = 1u << inst->dma_cfg[TMDS_SYNC_LANE].chan_data; dvi_dma_irq_handler(inst); }
40.212928
142
0.749433
[ "solid" ]
bd4a24b8185493a74d073c866180c426a4fa5dfe
1,992
h
C
SimulationCore/Particle_2D_ME.h
MingAtUWA/SimpleMPM
46a0e48028b7d6258f452f9cbee6195bb7f6aa41
[ "MIT" ]
null
null
null
SimulationCore/Particle_2D_ME.h
MingAtUWA/SimpleMPM
46a0e48028b7d6258f452f9cbee6195bb7f6aa41
[ "MIT" ]
null
null
null
SimulationCore/Particle_2D_ME.h
MingAtUWA/SimpleMPM
46a0e48028b7d6258f452f9cbee6195bb7f6aa41
[ "MIT" ]
null
null
null
#ifndef _PARTICLE_2D_ME_H_ #define _PARTICLE_2D_ME_H_ #include "Object.h" #include "ConstitutiveModel.h" struct Object_Particle_2D_ME; struct Particle_2D_ME : public Particle { public: // to which object this particle belongs Object_Particle_2D_ME *object; union { double coords[2]; struct { double x, y; }; }; /* ------------------------------------------ * The two unions below are used to improve floating point * accuracy when ux << x uy << y. * -----------------------------------------*/ union { double coords_ori[2]; struct { double x_ori, y_ori; }; }; union { double displacement[2]; struct { double ux, uy; }; }; // mass double m; double density; // velocity union { double velocity[2]; struct { double vx, vy; }; }; union { double stress[6]; struct { double s11, s22, s33, s12, s23, s31; }; }; // total strain union { double strain[3]; struct { double e11, e22, e12; }; }; // elastic strain union { double estrain[3]; struct { double es11, es22, es12; }; }; // plastic strain union { double pstrain[3]; struct { double ps11, ps22, ps12; }; }; // Constitutive Model ConstitutiveModel *cm; // Calculation variables ParticleVar *var; public: void init(void) { x = 0.0; y = 0.0; x_ori = 0.0; y_ori = 0.0; ux = 0.0; uy = 0.0; m = 0.0; density = 0.0; vx = 0.0; vy = 0.0; s11 = 0.0; s22 = 0.0; s33 = 0.0; s12 = 0.0; s23 = 0.0; s31 = 0.0; e11 = 0.0; e22 = 0.0; e12 = 0.0; es11 = 0.0; es22 = 0.0; es12 = 0.0; ps11 = 0.0; ps22 = 0.0; ps12 = 0.0; cm = nullptr; var = nullptr; } public: // critical time step at particle inline double critical_time_step(double elem_char_len) { double Ec = (1.0 - niu) / ((1.0 + niu) * (1.0 - 2.0 * niu)) * E; return elem_char_len / sqrt(Ec / density); } public: // --------------- Obsoleted ----------------- // constitutive relation double E; // Elastic modulus double niu; // Poisson ratio }; #endif
16.46281
66
0.566265
[ "object", "model" ]
bd52c4f6e0c9c90851995c5170a7151dd60fbe4d
6,343
h
C
materias 4 periodo/engenharia de softaware I/sprints individuais/sprint 6/BCC322-Sprint6-Marcus/MyVensim/src/include/model.h
marcusv77/universidade
723f9a2ac7b7c14c11ada751c6daa1bd1cd0c4df
[ "MIT" ]
null
null
null
materias 4 periodo/engenharia de softaware I/sprints individuais/sprint 6/BCC322-Sprint6-Marcus/MyVensim/src/include/model.h
marcusv77/universidade
723f9a2ac7b7c14c11ada751c6daa1bd1cd0c4df
[ "MIT" ]
null
null
null
materias 4 periodo/engenharia de softaware I/sprints individuais/sprint 6/BCC322-Sprint6-Marcus/MyVensim/src/include/model.h
marcusv77/universidade
723f9a2ac7b7c14c11ada751c6daa1bd1cd0c4df
[ "MIT" ]
null
null
null
#ifndef MODEL_H #define MODEL_H #include <vector> #include "./flowImpl.h" using namespace std; //! Class Model /** * This Class represents a model in the General Systems Theory implemented in this code. */ class Model{ protected: /*! Adds a system's pointer to the systems vector. \param sys the system to be added. */ virtual void add(System* sys) = 0; /*! Adds a flow's pointer to the flows vector. \param flow the flow to be added. */ virtual void add(Flow* flow) = 0; public: friend class UnitModel; /*!< Class to support on the unit tests */ typedef vector<System*>::iterator systemIterator; typedef vector<Flow*>::iterator flowIterator; typedef vector<Model*>::iterator modelIterator; virtual systemIterator beginSystems() = 0; /*!< Returns the iterator to the beginning of systems attribute. */ virtual systemIterator endSystems() = 0; /*!< Returns the iterator to the end of systems attribute. */ virtual flowIterator beginFlows() = 0; /*!< Returns the iterator to the beginning of flows attribute. */ virtual flowIterator endFlows() = 0; /*!< Returns the iterator to the end of flows attribute. */ virtual modelIterator beginModels() = 0; /*!< Returns the iterator to the beginning of models attribute. */ virtual modelIterator endModels() = 0; /*!< Returns the iterator to the end of models attribute. */ /*! This is the default destructor for the Model Class. */ virtual ~Model(){} /*! Executes all the flows in the model. \param start the initial time. \param final the final time. \param increment represents the iteration step. */ virtual void execute(double start, double final, double increment) = 0; /*! Creates a model and returns the model pointer. \param name the model's name. \param time the initial time. */ static Model* createModel(string name="", double time=0); /*! Creates a flow and returns the flow pointer. \param name the flow's name. \param source a pointer to the source system. \param target a pointer to the target system. */ template <typename F_IMPL> Flow* createFlow(string name = "", System* source = NULL, System* target = NULL){ Flow* flow = new FlowHandle<F_IMPL>(name, source, target); add(flow); return flow; } /*! Creates a system and returns the system pointer. \param name the system's name. \param value the value of the system. */ virtual System* createSystem(string name, double value) = 0; /*! Removes a system's pointer on the systems vector. \param sys which will be removed from the vector flows. */ virtual void remove(System* sys) = 0; /*! Removes a flow's pointer on the flows vector. \param flow which will be removed from the vector flows. */ virtual void remove(Flow* flow) = 0; /*! Sets the name attribute in the Model Class. \param modelName which will be set to the current model. */ virtual void setName(string modelName) = 0; /*! Returns the name attribute in the Model Class. \return string - the name attribute. */ virtual string getName() const = 0; /*! Sets the time attribute in the Model Class. \param currentTime which will be set to the current model. */ virtual void setTime(double currentTime) = 0; /*! Returns the time attribute in the Model Class. \return double - the time attribute. */ virtual double getTime() const = 0; /*! Sets the source system of a flow. \param flow the flow that will receive a source system. \param sourceSys a pointer to the source system. */ virtual void setSource(Flow* flow, System* sourceSys) = 0; /*! Returns the source system of a flow. \param flow the flow that will return his source system. \return System* - the pointer of the source system. */ virtual System* getSource(Flow* flow) = 0; /*! Sets the pointer of a flow's source system as NULL. \param flow the flow which will have his source system changed. */ virtual void clearSource(Flow* flow) = 0; /*! Sets the target system of a flow. \param flow the flow that will receive a target system. \param targetSys a pointer to the target system. */ virtual void setTarget(Flow* flow, System* targetSys) = 0; /*! Returns the target system of a flow. \param flow the flow that will return his target system. \return System* - the pointer of the target system. */ virtual System* getTarget(Flow* flow) = 0; /*! Sets the pointer of a flow's target system as NULL. \param flow the flow which will have his target system changed. */ virtual void clearTarget(Flow* flow) = 0; /*! This method increments the time attribute in the Model Class. \param increment which will define by how much time should increment. */ virtual void incrementTime(double increment) = 0; /*! Returns a system in the index-th position of the systems attribute Model Class. \return System* - a system in the index-th position of the systems attribute. */ virtual System* getSystem(int index) = 0; /*! Returns a flow in the index-th position of the flows attribute Model Class. \return Flow* - a flow in the index-th position of the systems attribute. */ virtual Flow* getFlow(int index) = 0; }; #endif
35.634831
118
0.576226
[ "vector", "model" ]
bd56509b2c049f412730409544b5c1c4b2224bdb
2,486
h
C
Engine/Source/Runtime/Engine/Classes/Materials/MaterialExpressionFunctionOutput.h
PopCap/GameIdea
201e1df50b2bc99afc079ce326aa0a44b178a391
[ "BSD-2-Clause" ]
null
null
null
Engine/Source/Runtime/Engine/Classes/Materials/MaterialExpressionFunctionOutput.h
PopCap/GameIdea
201e1df50b2bc99afc079ce326aa0a44b178a391
[ "BSD-2-Clause" ]
2
2015-06-21T17:38:11.000Z
2015-06-22T20:54:42.000Z
Engine/Source/Runtime/Engine/Classes/Materials/MaterialExpressionFunctionOutput.h
PopCap/GameIdea
201e1df50b2bc99afc079ce326aa0a44b178a391
[ "BSD-2-Clause" ]
null
null
null
// Copyright 1998-2015 Epic Games, Inc. All Rights Reserved. #pragma once #include "Materials/MaterialExpression.h" #include "MaterialExpressionFunctionOutput.generated.h" UCLASS(hidecategories=object, MinimalAPI) class UMaterialExpressionFunctionOutput : public UMaterialExpression { GENERATED_UCLASS_BODY() /** The output's name, which will be drawn on the connector in function call expressions that use this function. */ UPROPERTY(EditAnywhere, Category=MaterialExpressionFunctionOutput) FString OutputName; /** The output's description, which will be used as a tooltip on the connector in function call expressions that use this function. */ UPROPERTY(EditAnywhere, Category=MaterialExpressionFunctionOutput, meta=(MultiLine=true)) FString Description; /** Controls where the output is displayed relative to the other outputs. */ UPROPERTY(EditAnywhere, Category=MaterialExpressionFunctionOutput) int32 SortPriority; /** Stores the expression in the material function connected to this output. */ UPROPERTY() FExpressionInput A; /** Whether this output was previewed the last time this function was edited. */ UPROPERTY() uint32 bLastPreviewed:1; /** Id of this input, used to maintain references through name changes. */ UPROPERTY() FGuid Id; // Begin UObject interface. virtual void PostLoad() override; virtual void PostDuplicate(bool bDuplicateForPIE) override; #if WITH_EDITOR virtual void PostEditImport() override; virtual void PreEditChange(UProperty* PropertyAboutToChange) override; virtual void PostEditChangeProperty(FPropertyChangedEvent& PropertyChangedEvent) override; #endif // WITH_EDITOR // End UObject interface. // Begin UMaterialExpression Interface virtual int32 Compile(class FMaterialCompiler* Compiler, int32 OutputIndex, int32 MultiplexIndex) override; virtual void GetCaption(TArray<FString>& OutCaptions) const override; virtual FString GetInputName(int32 InputIndex) const override { return TEXT(""); } #if WITH_EDITOR virtual void GetExpressionToolTip(TArray<FString>& OutToolTip) override; virtual uint32 GetInputType(int32 InputIndex) override; #endif virtual bool IsResultMaterialAttributes(int32 OutputIndex) override; // End UMaterialExpression Interface /** Generate the Id for this input. */ ENGINE_API void ConditionallyGenerateId(bool bForce); /** Validate OutputName. Must be called after OutputName is changed to prevent duplicate outputs. */ ENGINE_API void ValidateName(); };
34.054795
135
0.79646
[ "object" ]
bd60ab1670123dd8dbd6af1291bb96f8b8b44894
3,439
h
C
src/brookbox/wm2/WmlBoundingVolumeTree.h
darwin/inferno
e87017763abae0cfe09d47987f5f6ac37c4f073d
[ "Zlib" ]
2
2016-05-09T11:57:28.000Z
2021-07-28T16:46:08.000Z
src/brookbox/wm2/WmlBoundingVolumeTree.h
darwin/inferno
e87017763abae0cfe09d47987f5f6ac37c4f073d
[ "Zlib" ]
null
null
null
src/brookbox/wm2/WmlBoundingVolumeTree.h
darwin/inferno
e87017763abae0cfe09d47987f5f6ac37c4f073d
[ "Zlib" ]
null
null
null
// Magic Software, Inc. // http://www.magic-software.com // http://www.wild-magic.com // Copyright (c) 2003. All Rights Reserved // // The Wild Magic Library (WML) source code is supplied under the terms of // the license agreement http://www.magic-software.com/License/WildMagic.pdf // and may not be copied or disclosed except in accordance with the terms of // that agreement. #ifndef WMLBOUNDINGVOLUMETREE_H #define WMLBOUNDINGVOLUMETREE_H #include "WmlBoundingVolume.h" #include "WmlVector3.h" namespace Wml { class WML_ITEM BoundingVolumeTree { public: BoundingVolumeTree (BoundingVolume::Type eType, int iVertexCount, const Vector3f* akVertex, int iTriangleCount, const int* aiConnect, int iMaxTrisPerLeaf = 1, bool bStoreInteriorTris = false); ~BoundingVolumeTree (); // tree topology BoundingVolumeTree* GetLChild (); BoundingVolumeTree* GetRChild (); bool IsInteriorNode () const; bool IsLeafNode () const; // model space data const BoundingVolume* GetModelBound () const; int GetTriangleQuantity () const; int GetTriangle (int i) const; const int* GetTriangles () const; // world space data BoundingVolume* GetWorldBound (); void InvalidateWorldBounds (); protected: // support for recursive construction of tree BoundingVolumeTree (); void CreateTree (BoundingVolume::Type eType, int iVertexCount, const Vector3f* akVertex, int iTriangleCount, const int* aiConnect, int iMaxTrisPerLeaf, bool bStoreInteriorTris, const Vector3f* akCentroid, int i0, int i1, int* aiISplit, int* aiOSplit); static void SplitTriangles (const Vector3f* akCentroid, int i0, int i1, int* aiISplit, int& rj0, int& rj1, int* aiOSplit, const Vector3f& rkOrigin, const Vector3f& rkDirection); // for quick-sort of centroid projections on axes class WML_ITEM ProjectionInfo { public: int m_iTriangle; float m_fProjection; }; static int Compare (const void* pvElement0, const void* pvElement1); // bounds and child links BoundingVolume* m_pkModelBound; BoundingVolume* m_pkWorldBound; BoundingVolumeTree* m_pkLChild; BoundingVolumeTree* m_pkRChild; // If bStoreInteriorTris is set to 'false' in the constructor, the // interior nodes set the triangle quantity to zero and the array to null. // Leaf nodes set the quantity to the number of triangles at that node (1 // if iMaxTrianglesPerLeaf was set to 1) and allocate an array of // triangle indices that are relative to the input mesh of the top level // constructor. // // If bStoreInteriorTris is set to 'true', the interior nodes also save // the triangle quantity and array of triangle indices for the mesh that // the node represents. int m_iTriangleQuantity; int* m_aiTriangle; #ifdef _DEBUG_TEST // Checks to see if the vertices corresponding to the triangle mesh at // at each tree node are contained by the model space bounding volume. // The call is only made when _DEBUG_TEST has been defined *and* when // bStoreInteriorTris is set to 'true'. bool ContainsLeafData (const Vector3f* akVertex, const int* aiConnect, float fEpsilon) const; #endif }; #include "WmlBoundingVolumeTree.inl" } #endif
33.067308
79
0.690026
[ "mesh", "model" ]
bd66b79bfa222942daadd6e49dbdaf9667dabdca
30,123
h
C
src/pke/include/scheme/bfvrns/bfvrns.h
yamanalab/PALISADE
b476d46170da62d7235d55d9a20497778e96a724
[ "BSD-2-Clause" ]
null
null
null
src/pke/include/scheme/bfvrns/bfvrns.h
yamanalab/PALISADE
b476d46170da62d7235d55d9a20497778e96a724
[ "BSD-2-Clause" ]
null
null
null
src/pke/include/scheme/bfvrns/bfvrns.h
yamanalab/PALISADE
b476d46170da62d7235d55d9a20497778e96a724
[ "BSD-2-Clause" ]
null
null
null
/** * @file bfvrns.h -- Operations for the HPS RNS variant of the BFV cryptoscheme. * @author TPOC: contact@palisade-crypto.org * * @copyright Copyright (c) 2019, New Jersey Institute of Technology (NJIT) * All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. THIS SOFTWARE IS * PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO * EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ /* * * This code implements a RNS variant of the Brakerski-Fan-Vercauteren (BFV) *homomorphic encryption scheme. This scheme is also referred to as the FV *scheme. * * The BFV scheme is introduced in the following papers: * - Zvika Brakerski (2012). Fully Homomorphic Encryption without Modulus *Switching from Classical GapSVP. Cryptology ePrint Archive, Report 2012/078. *(https://eprint.iacr.org/2012/078) * - Junfeng Fan and Frederik Vercauteren (2012). Somewhat Practical Fully *Homomorphic Encryption. Cryptology ePrint Archive, Report 2012/144. *(https://eprint.iacr.org/2012/144.pdf) * * Our implementation builds from the designs here: * - Halevi S., Polyakov Y., and Shoup V. An Improved RNS Variant of the BFV *Homomorphic Encryption Scheme. Cryptology ePrint Archive, Report 2018/117. *(https://eprint.iacr.org/2018/117) * - Lepoint T., Naehrig M. (2014) A Comparison of the Homomorphic Encryption *Schemes FV and YASHE. In: Pointcheval D., Vergnaud D. (eds) Progress in *Cryptology – AFRICACRYPT 2014. AFRICACRYPT 2014. Lecture Notes in Computer *Science, vol 8469. Springer, Cham. (https://eprint.iacr.org/2014/062.pdf) * - Jean-Claude Bajard and Julien Eynard and Anwar Hasan and Vincent *Zucca (2016). A Full RNS Variant of FV like Somewhat Homomorphic Encryption *Schemes. Cryptology ePrint Archive, Report 2016/510. *(https://eprint.iacr.org/2016/510) * - Ahmad Al Badawi and Yuriy Polyakov and Khin Mi Mi Aung and Bharadwaj *Veeravalli and Kurt Rohloff (2018). Implementation and Performance Evaluation *of RNS Variants of the BFV Homomorphic Encryption Scheme. Cryptology ePrint *Archive, Report 2018/589. {https://eprint.iacr.org/2018/589} */ #ifndef LBCRYPTO_CRYPTO_BFVRNS_H #define LBCRYPTO_CRYPTO_BFVRNS_H #include "palisade.h" namespace lbcrypto { /** * @brief This is the parameters class for the BFVrns encryption scheme. This * scheme is also referred to as the FVrns scheme. * * @tparam Element a ring element type. */ template <class Element> class LPCryptoParametersBFVrns : public LPCryptoParametersRLWE<Element> { public: /** * Default constructor. */ LPCryptoParametersBFVrns(); /** * Copy constructor. * @param rhs - source */ LPCryptoParametersBFVrns(const LPCryptoParametersBFVrns& rhs); /** * Constructor that initializes values. Note that it is possible to set * parameters in a way that is overall infeasible for actual use. There are * fewer degrees of freedom than parameters provided. Typically one chooses * the basic noise, assurance and security parameters as the typical * community-accepted values, then chooses the plaintext modulus and depth as * needed. The element parameters should then be choosen to provide * correctness and security. In some cases we would need to operate over * already encrypted/provided ciphertext and the depth needs to be * pre-computed for initial settings. * * @param &params Element parameters. This will depend on the specific class * of element being used. * @param &plaintextModulus Plaintext modulus, typically denoted as p in most * publications. * @param distributionParameter Noise distribution parameter, typically * denoted as /sigma in most publications. Community standards typically call * for a value of 3 to 6. Lower values provide more room for computation while * larger values provide more security. * @param assuranceMeasure Assurance level, typically denoted as w in most * applications. This is oftern perceived as a fudge factor in the * literature, with a typical value of 9. * @param securityLevel Security level as Root Hermite Factor. We use the * Root Hermite Factor representation of the security level to better conform * with US ITAR and EAR export regulations. This is typically represented as * /delta in the literature. Typically a Root Hermite Factor of 1.006 or less * provides reasonable security for RLWE crypto schemes. * @param relinWindow The size of the relinearization window. This is * relevant when using this scheme for proxy re-encryption, and the value is * denoted as r in the literature. * @param mode optimization setting (RLWE vs OPTIMIZED) * @param depth is the depth of computation circuit supported for these * parameters (not used now; for future use). * @param maxDepth is the maximum homomorphic multiplication depth before * performing relinearization */ LPCryptoParametersBFVrns(shared_ptr<typename Element::Params> params, const PlaintextModulus& plaintextModulus, float distributionParameter, float assuranceMeasure, float securityLevel, usint relinWindow, MODE mode = RLWE, int depth = 1, int maxDepth = 2); /** * Constructor that initializes values. * * @param &params element parameters. * @param &encodingParams plaintext space parameters. * @param distributionParameter noise distribution parameter. * @param assuranceMeasure assurance level. = BigInteger::ZERO * @param securityLevel security level (root Hermite factor). * @param relinWindow the size of the relinearization window. * @param mode optimization setting (RLWE vs OPTIMIZED) * @param depth is the depth of computation circuit supported for these * parameters (not used now; for future use). * @param maxDepth is the maximum homomorphic multiplication depth before * performing relinearization */ LPCryptoParametersBFVrns(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, float distributionParameter, float assuranceMeasure, float securityLevel, usint relinWindow, MODE mode = RLWE, int depth = 1, int maxDepth = 2); /** * Constructor that initializes values. * * @param &params element parameters. * @param &encodingParams plaintext space parameters. * @param distributionParameter noise distribution parameter. * @param assuranceMeasure assurance level. = BigInteger::ZERO * @param securityLevel standard security level * @param relinWindow the size of the relinearization window. * @param mode optimization setting (RLWE vs OPTIMIZED) * @param depth is the depth of computation circuit supported for these * parameters (not used now; for future use). * @param maxDepth is the maximum homomorphic multiplication depth before * performing relinearization */ LPCryptoParametersBFVrns(shared_ptr<typename Element::Params> params, EncodingParams encodingParams, float distributionParameter, float assuranceMeasure, SecurityLevel securityLevel, usint relinWindow, MODE mode = RLWE, int depth = 1, int maxDepth = 2); /** * Destructor */ virtual ~LPCryptoParametersBFVrns() {} /** * Computes all tables needed for decryption, homomorphic multiplication, and * key switching * @return true on success */ bool PrecomputeCRTTables(); /** * Gets Auxiliary CRT basis S=s1*s2*..sn used in homomorphic multiplication * * @return the precomputed CRT basis */ const shared_ptr<ILDCRTParams<BigInteger>> GetDCRTParamsS() const { return m_paramsS; } /** * Auxiliary expanded CRT basis Q*S = v1*v2*...*vn used in homomorphic * multiplication * * @return the precomputed CRT basis */ const shared_ptr<ILDCRTParams<BigInteger>> GetDCRTParamsQS() const { return m_paramsQS; } /** * Gets the Barrett modulo reduction precomputations for Q * * @return the precomputed table */ std::vector<DoubleNativeInt> const& GetDCRTParamsQModulimu() const { return m_qModulimu; } /** * Gets the Barrett modulo reduction precomputations for S * * @return the precomputed table */ std::vector<DoubleNativeInt> const& GetDCRTParamsSModulimu() const { return m_sModulimu; } /** * Gets the precomputed table of ((p*[(Q/qi)^{-1}]_qi)%qi)/qi; CRT modulus < * 45 bits * * @return the precomputed table */ const std::vector<double>& GetCRTDecryptionFloatTable() const { return m_CRTDecryptionFloatTable; } /** * Gets the precomputed table of ((p*[(Q/qi)^{-1}]_qi)%qi)/qi; CRT modulus is * betweeen 45 and 57 bits * * @return the precomputed table */ const std::vector<long double>& GetCRTDecryptionExtFloatTable() const { return m_CRTDecryptionExtFloatTable; } #ifndef NO_QUADMATH /** * Gets the precomputed table of ((p*[(Q/qi)^{-1}]_qi)%qi)/qi; CRT modulus * has 58..60 bits * * @return the precomputed table */ const std::vector<QuadFloat>& GetCRTDecryptionQuadFloatTable() const { return m_CRTDecryptionQuadFloatTable; } #endif /** * Gets the precomputed table of floor[(p*[(Q/qi)^{-1}]_qi)/qi]_p * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTDecryptionIntTable() const { return m_CRTDecryptionIntTable; } /** * Gets the NTL precomputation for the precomputed table of * floor[(p*[(Q/qi)^{-1}]_qi)/qi]_p * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTDecryptionIntPreconTable() const { return m_CRTDecryptionIntPreconTable; } /** * Gets the precomputed table of floor(Q/p) mod qi * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTDeltaTable() const { return m_CRTDeltaTable; } /** * Gets the precomputed table of (Q/qi)^{-1} mod qi * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTInverseTable() const { return m_CRTInverseTable; } /** * Gets the NTL precomputation for the precomputed table of (Q/qi)^{-1} mod qi * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTInversePreconTable() const { return m_CRTInversePreconTable; } /** * Gets the precomputed table of (Q/qi) mod si * * @return the precomputed table */ const std::vector<std::vector<NativeInteger>>& GetCRTqDivqiModsiTable() const { return m_CRTqDivqiModsiTable; } /** * Gets the precomputed table of Q mod si * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTqModsiTable() const { return m_CRTqModsiTable; } /** * Gets the precomputed table of [p*S*(Q*S/vi)^{-1}]_vi / vi * * @return the precomputed table */ const std::vector<long double>& GetCRTMultFloatTable() const { return m_CRTMultFloatTable; } /** * Gets the precomputed table of floor[p*S*[(Q*S/vi)^{-1}]_vi/vi] mod si * * @return the precomputed table */ const std::vector<std::vector<NativeInteger>>& GetCRTMultIntTable() const { return m_CRTMultIntTable; } /** * Gets the precomputed table of (S/si)^{-1} mod si * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTSInverseTable() const { return m_CRTSInverseTable; } /** * Gets the NTL precomputation for the precomputed table of (S/si)^{-1} mod si * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTSInversePreconTable() const { return m_CRTSInversePreconTable; } /** * Gets the precomputed table of (S/si) mod qi table * * @return the precomputed table */ const std::vector<std::vector<NativeInteger>>& GetCRTsDivsiModqiTable() const { return m_CRTsDivsiModqiTable; } /** * Gets the precomputed table of S mod qi table * * @return the precomputed table */ const std::vector<NativeInteger>& GetCRTsModqiTable() const { return m_CRTsModqiTable; } /** * == operator to compare to this instance of LPCryptoParametersBFVrns object. * * @param &rhs LPCryptoParameters to check equality against. */ bool operator==(const LPCryptoParameters<Element>& rhs) const { const LPCryptoParametersBFVrns<Element>* el = dynamic_cast<const LPCryptoParametersBFVrns<Element>*>(&rhs); if (el == 0) return false; return LPCryptoParametersRLWE<Element>::operator==(rhs); } void PrintParameters(std::ostream& os) const { LPCryptoParametersRLWE<Element>::PrintParameters(os); } // NOTE that we do not serialize any of the members declared in this class. // they are all cached computations, and get recomputed in any implementation // that does a deserialization template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::base_class<LPCryptoParametersRLWE<Element>>(this)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { if (version > SerializedVersion()) { PALISADE_THROW(deserialize_error, "serialized object version " + std::to_string(version) + " is from a later version of the library"); } ar(::cereal::base_class<LPCryptoParametersRLWE<Element>>(this)); PrecomputeCRTTables(); } std::string SerializedObjectName() const { return "BFVrnsSchemeParameters"; } static uint32_t SerializedVersion() { return 1; } private: // Auxiliary CRT basis S=s1*s2*..sn used in homomorphic multiplication shared_ptr<ILDCRTParams<BigInteger>> m_paramsS; // Auxiliary expanded CRT basis Q*S = v1*v2*...*vn used in homomorphic // multiplication shared_ptr<ILDCRTParams<BigInteger>> m_paramsQS; // Barrett modulo reduction precomputation std::vector<DoubleNativeInt> m_qModulimu; // Barrett modulo reduction precomputation std::vector<DoubleNativeInt> m_sModulimu; // when log2 qi <= 44 bits // Stores a precomputed table of ((p*[(Q/qi)^{-1}]_qi)%qi)/qi std::vector<double> m_CRTDecryptionFloatTable; // when 44 < log2 qi <= 57 bits // Stores a precomputed table of ((p*[(Q/qi)^{-1}]_qi)%qi)/qi std::vector<long double> m_CRTDecryptionExtFloatTable; #ifndef NO_QUADMATH // when log2 qi = 58..60 bits // Stores a precomputed table of ((p*[(Q/qi)^{-1}]_qi)%qi)/qi std::vector<QuadFloat> m_CRTDecryptionQuadFloatTable; #endif // Stores a precomputed table of floor[(p*[(Q/qi)^{-1}]_qi)/qi]_p std::vector<NativeInteger> m_CRTDecryptionIntTable; // Stores a precomputed table of floor(Q/p) mod qi std::vector<NativeInteger> m_CRTDeltaTable; // Stores a precomputed table of (Q/qi)^{-1} mod qi std::vector<NativeInteger> m_CRTInverseTable; // Stores an NTL precomputation for the precomputed table of (Q/qi)^{-1} mod // qi std::vector<NativeInteger> m_CRTInversePreconTable; // Stores a precomputed table of (Q/qi) mod si std::vector<std::vector<NativeInteger>> m_CRTqDivqiModsiTable; // Stores a precomputed table of Q mod si std::vector<NativeInteger> m_CRTqModsiTable; // Stores a precomputed table of floor[p*S*[(Q*S/vi)^{-1}]_vi/vi] mod si std::vector<std::vector<NativeInteger>> m_CRTMultIntTable; // Stores a precomputed table of [p*S*(Q*S/vi)^{-1}]_vi / vi std::vector<long double> m_CRTMultFloatTable; // Stores a precomputed table of (S/si)^{-1} mod si std::vector<NativeInteger> m_CRTSInverseTable; // Stores an NTL precomputation for the precomputed table of (S/si)^{-1} mod // si std::vector<NativeInteger> m_CRTSInversePreconTable; // Stores a precomputed table of (S/si) mod qi table std::vector<std::vector<NativeInteger>> m_CRTsDivsiModqiTable; // Stores a precomputed table of S mod qi table std::vector<NativeInteger> m_CRTsModqiTable; // Stores an NTL precomputation for the precomputed table of // floor[(p*[(Q/qi)^{-1}]_qi)/qi]_p std::vector<NativeInteger> m_CRTDecryptionIntPreconTable; }; /** * @brief Parameter generation for BFVrns. This scheme is also referred to as * the FV scheme. * * @tparam Element a ring element. */ template <class Element> class LPAlgorithmParamsGenBFVrns : public LPAlgorithmParamsGenBFV<Element> { public: /** * Default constructor */ LPAlgorithmParamsGenBFVrns() {} /** * Method for computing all derived parameters based on chosen primitive * parameters * * @param cryptoParams the crypto parameters object to be populated with * parameters. * @param evalAddCount number of EvalAdds assuming no EvalMult and KeySwitch * operations are performed. * @param evalMultCount number of EvalMults assuming no EvalAdd and KeySwitch * operations are performed. * @param keySwitchCount number of KeySwitch operations assuming no EvalAdd * and EvalMult operations are performed. * @param dcrtBits number of bits in each CRT modulus * @param n ring dimension in case the user wants to use a custom ring * dimension */ bool ParamsGen(shared_ptr<LPCryptoParameters<Element>> cryptoParams, int32_t evalAddCount = 0, int32_t evalMultCount = 0, int32_t keySwitchCount = 0, size_t dcrBits = 60, uint32_t n = 0) const; }; /** * @brief Encryption algorithm implementation for BFVrns for the basic public * key encrypt, decrypt and key generation methods for the BFVrns encryption * scheme. * * @tparam Element a ring element. */ template <class Element> class LPAlgorithmBFVrns : public LPAlgorithmBFV<Element> { public: /** * Default constructor */ LPAlgorithmBFVrns() {} /** * Method for encrypting plaintext using BFVrns. * * @param publicKey public key used for encryption. * @param &plaintext the plaintext input. * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return ciphertext which results from encryption. */ Ciphertext<Element> Encrypt(const LPPublicKey<Element> publicKey, Element plaintext) const; /** * Method for encrypting plaintext with private key using BFVrns. * * @param privateKey private key used for encryption. * @param plaintext the plaintext input. * @param doEncryption encrypts if true, embeds (encodes) the plaintext into * cryptocontext if false * @return ciphertext which results from encryption. */ Ciphertext<Element> Encrypt(const LPPrivateKey<Element> privateKey, Element plaintext) const; /** * Method for decrypting using BFVrns. See the class description for citations * on where the algorithms were taken from. * * @param privateKey private key used for decryption. * @param ciphertext ciphertext to be decrypted. * @param *plaintext the plaintext output. * @return the decrypted plaintext returned. */ DecryptResult Decrypt(const LPPrivateKey<Element> privateKey, ConstCiphertext<Element> ciphertext, NativePoly* plaintext) const; }; /** * @brief SHE algorithms implementation for BFVrns. * * @tparam Element a ring element. */ template <class Element> class LPAlgorithmSHEBFVrns : public LPAlgorithmSHEBFV<Element> { public: /** * Default constructor */ LPAlgorithmSHEBFVrns() {} /** * Function for homomorphic addition of ciphertext and plaintext. * * @param ct1 input ciphertext. * @param pt input ciphertext. * @return new ciphertext. */ Ciphertext<Element> EvalAdd(ConstCiphertext<Element> ct, ConstPlaintext pt) const; /** * Function for homomorphic subtraction of ciphertext ans plaintext. * * @param ct input ciphertext. * @param pt input ciphertext. * @return new ciphertext. */ Ciphertext<Element> EvalSub(ConstCiphertext<Element> ct1, ConstPlaintext pt) const; /** * Function for homomorphic evaluation of ciphertexts. * The multiplication is supported for a fixed level without keyswitching * requirement (default level=2). If the total depth of the ciphertexts * exceeds the supported level, it throws an error. * * @param ciphertext1 first input ciphertext. * @param ciphertext2 second input ciphertext. * @return resulting EvalMult ciphertext. */ Ciphertext<Element> EvalMult(ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct2) const; /** * Method for generating a KeySwitchHint using RLWE relinearization * * @param originalPrivateKey Original private key used for encryption. * @param newPrivateKey New private key to generate the keyswitch hint. * @return resulting keySwitchHint. */ LPEvalKey<Element> KeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey) const; /** * Method for key switching based on a KeySwitchHint using RLWE * relinearization * * @param keySwitchHint Hint required to perform the ciphertext switching. * @param &cipherText Original ciphertext to perform switching on. * @return new ciphertext */ Ciphertext<Element> KeySwitch(const LPEvalKey<Element> keySwitchHint, ConstCiphertext<Element> cipherText) const; /** * Function for evaluating multiplication on ciphertext followed by * relinearization operation. Currently it assumes that the input arguments * have total depth smaller than the supported depth. Otherwise, it throws an * error. * * @param ct1 first input ciphertext. * @param ct2 second input ciphertext. * @param ek is the evaluation key to make the newCiphertext * decryptable by the same secret key as that of ciphertext1 and ciphertext2. * @return new ciphertext */ Ciphertext<Element> EvalMultAndRelinearize( ConstCiphertext<Element> ct1, ConstCiphertext<Element> ct, const vector<LPEvalKey<Element>>& ek) const; }; /** * @brief PRE algorithms implementation for BFVrns. * * @tparam Element a ring element. */ template <class Element> class LPAlgorithmPREBFVrns : public LPAlgorithmPREBFV<Element> { public: /** * Default constructor */ LPAlgorithmPREBFVrns() {} /** * The generation of re-encryption keys is based on the BG-PRE scheme * described in Polyakov, et. al., "Fast proxy re-encryption for * publish/subscribe systems". * * The above scheme was found to have a weakness in Cohen, "What about Bob? * The inadequacy of CPA Security for proxy re-encryption". Section 5.1 shows * an attack where given an original ciphertext c=(c0,c1) and a re-encrypted * ciphertext c'=(c'0, c'1), the subscriber (Bob) can compute the secret key * of the publisher (Alice). * * We fix this vulnerability by making re-encryption keys be encryptions of * the s*(2^{i*r}) terms, instead of simple addition as previously defined. * This makes retrieving the secret key using the above attack as hard as * breaking the RLWE assumption. * * Our modification makes the scheme CPA-secure, but does not achieve * HRA-security as it was defined in the Cohen paper above. Please look at the * ReEncrypt method for an explanation of the two security definitions and how * to achieve each in Palisade. * * @param newKey public key for the new private key. * @param origPrivateKey original private key used for decryption. * @return evalKey the evaluation key for switching the ciphertext to be * decryptable by new private key. */ LPEvalKey<Element> ReKeyGen(const LPPublicKey<Element> newKey, const LPPrivateKey<Element> origPrivateKey) const; /** * This method implements re-encryption using the evaluation key generated by * ReKeyGen. * * The PRE scheme used can achieve two different levels of security, based on * the value supplied in the publicKey argument: * * If publicKey is nullptr, the PRE scheme is CPA-secure. If the publicKey of * the recipient of the re-encrypted ciphertext is supplied, then the scheme * is HRA- secure. Please refer to Cohen, "What about Bob? The inadequacy of * CPA Security for proxy re-encryption", for more information on HRA * security. * * The tradeoff of going for HRA is twofold: (1) performance is a little worst * because we add one additional encryption and homomorphic addition to the * result, and (2) more noise is added to the result because of the additional * operations - in particular, the extra encryption draws noise from a * distribution whose standard deviation is scaled by K, the number of digits * in the PRE decomposition. * * @param evalKey the evaluation key. * @param ciphertext the input ciphertext. * @param publicKey the public key of the recipient of the re-encrypted * ciphertext. * @return resulting ciphertext after the re-encryption operation. */ Ciphertext<Element> ReEncrypt( const LPEvalKey<Element> EK, ConstCiphertext<Element> ciphertext, const LPPublicKey<Element> publicKey = nullptr) const; }; /** * @brief Concrete class for the FHE Multiparty algorithms on BFVrns. This * scheme is also referred to as the FV scheme. A version of this multiparty * scheme built on the BGV scheme is seen here: * - Asharov G., Jain A., López-Alt A., Tromer E., Vaikuntanathan V., Wichs D. * (2012) Multiparty Computation with Low Communication, Computation and * Interaction via Threshold FHE. In: Pointcheval D., Johansson T. (eds) * Advances in Cryptology – EUROCRYPT 2012. EUROCRYPT 2012. Lecture Notes in * Computer Science, vol 7237. Springer, Berlin, Heidelberg * * During offline key generation, this multiparty scheme relies on the clients * coordinating their public key generation. To do this, a single client * generates a public-secret key pair. This public key is shared with other keys * which use an element in the public key to generate their own public keys. The * clients generate a shared key pair using a scheme-specific approach, then * generate re-encryption keys. Re-encryption keys are uploaded to the server. * Clients encrypt data with their public keys and send the encrypted data * server. The data is re-encrypted. Computations are then run on the data. The * result is sent to each of the clients. One client runs a "Leader" multiparty * decryption operation with its own secret key. All other clients run a * regular "Main" multiparty decryption with their own secret key. The resulting * partially decrypted ciphertext are then fully decrypted with the decryption * fusion algorithms. * * @tparam Element a ring element. */ template <class Element> class LPAlgorithmMultipartyBFVrns : public LPAlgorithmMultipartyBFV<Element> { public: /** * Default constructor */ LPAlgorithmMultipartyBFVrns() {} /** * Method for fusing the partially decrypted ciphertext. * * @param &ciphertextVec ciphertext id decrypted. * @param *plaintext the plaintext output. * @return the decoding result. */ DecryptResult MultipartyDecryptFusion( const vector<Ciphertext<Element>>& ciphertextVec, NativePoly* plaintext) const; template <class Archive> void save(Archive& ar) const { ar(cereal::base_class<LPAlgorithmMultipartyBFV<Element>>(this)); } template <class Archive> void load(Archive& ar) { ar(cereal::base_class<LPAlgorithmMultipartyBFV<Element>>(this)); } LPEvalKey<Element> MultiKeySwitchGen( const LPPrivateKey<Element> originalPrivateKey, const LPPrivateKey<Element> newPrivateKey, const LPEvalKey<Element> ek) const; std::string SerializedObjectName() const { return "BFVrnsMultiparty"; } }; /** * @brief Main public key encryption scheme for BFVrns implementation, * @tparam Element a ring element. */ template <class Element> class LPPublicKeyEncryptionSchemeBFVrns : public LPPublicKeyEncryptionScheme<Element> { public: LPPublicKeyEncryptionSchemeBFVrns(); bool operator==(const LPPublicKeyEncryptionScheme<Element>& sch) const { if (dynamic_cast<const LPPublicKeyEncryptionSchemeBFVrns<Element>*>(&sch) == 0) return false; return true; } void Enable(PKESchemeFeature feature); template <class Archive> void save(Archive& ar, std::uint32_t const version) const { ar(::cereal::base_class<LPPublicKeyEncryptionScheme<Element>>(this)); } template <class Archive> void load(Archive& ar, std::uint32_t const version) { ar(::cereal::base_class<LPPublicKeyEncryptionScheme<Element>>(this)); } std::string SerializedObjectName() const { return "BFVrnsScheme"; } }; } // namespace lbcrypto #endif
36.512727
80
0.709524
[ "object", "vector" ]
bd6ec49f766ecd4a1d3f44afe1529e6eb4989e2b
3,708
h
C
Airship/AirshipCore/Source/Internal/UAAttributeRegistrar+Internal.h
andreinagy/ios-library
77770031f0a5d83539ea5468780126eda8d167a9
[ "Apache-2.0" ]
null
null
null
Airship/AirshipCore/Source/Internal/UAAttributeRegistrar+Internal.h
andreinagy/ios-library
77770031f0a5d83539ea5468780126eda8d167a9
[ "Apache-2.0" ]
null
null
null
Airship/AirshipCore/Source/Internal/UAAttributeRegistrar+Internal.h
andreinagy/ios-library
77770031f0a5d83539ea5468780126eda8d167a9
[ "Apache-2.0" ]
null
null
null
/* Copyright Airship and Contributors */ #import <Foundation/Foundation.h> #import "UAPersistentQueue+Internal.h" #import "UAAttributePendingMutations.h" #import "UARuntimeConfig+Internal.h" #import "UAPreferenceDataStore.h" #import "UAAttributeAPIClient+Internal.h" NS_ASSUME_NONNULL_BEGIN /** * Attribute upload results. */ typedef NS_ENUM(NSUInteger, UAAttributeUploadResult) { /** * Attribute either uploaded successfully or failed with an unrecoverable error code. */ UAAttributeUploadResultFinished, /** * Attribute already up to date.. */ UAAttributeUploadResultUpToDate, /** * Attribute uploads failed and should retry. */ UAAttributeUploadResultFailed, }; /** * Delegate protocol for registrar callbacks. */ @protocol UAAttributeRegistrarDelegate <NSObject> @required /** * Called when mutations have been succesfully uploaded. * * @param mutations The mutations. * @param identifier The identifier associated with the mutations. */ - (void)uploadedAttributeMutations:(UAAttributePendingMutations *)mutations identifier:(NSString *)identifier; @end /** The registrar responsible for routing requests to the attributes APIs. */ @interface UAAttributeRegistrar : NSObject ///--------------------------------------------------------------------------------------- /// @name Attribute Registrar Internal Methods ///--------------------------------------------------------------------------------------- /** * Factory method to create the channel attribute registrar. * @param config The Airship config. * @return A new attribute registrar instance. */ + (instancetype)channelRegistrarWithConfig:(UARuntimeConfig *)config dataStore:(UAPreferenceDataStore *)dateStore; /** * Factory method to create the named user attribute registrar. * @param config The Airship config. * @return A new attribute registrar instance. */ + (instancetype)namedUserRegistrarWithConfig:(UARuntimeConfig *)config dataStore:(UAPreferenceDataStore *)dateStore; /** * Factory method to create an attribute registrar for testing. * @param APIClient The attributes API client. * @param persistentQueue The queue. * @param application The application. * @return A new attributes registrar instance. */ + (instancetype)registrarWithAPIClient:(UAAttributeAPIClient *)APIClient persistentQueue:(UAPersistentQueue *)persistentQueue application:(UIApplication *)application; /** Method to save pending mutations for asynchronous upload. @param mutations The channel attribute mutations to save. */ - (void)savePendingMutations:(UAAttributePendingMutations *)mutations; /** * Clears pending mutations. */ - (void)clearPendingMutations; /** * Sets the currently associated identifier. * * @param identifier The identifier. * @param clearPendingOnChange Whether pending mutations should be cleared if the identifier has changed. */ - (void)setIdentifier:(nullable NSString *)identifier clearPendingOnChange:(BOOL)clearPendingOnChange; /** * Update attributes * * @param completionHandler The completion handler. * @return UADisposable object */ - (UADisposable *)updateAttributesWithCompletionHandler:(void(^)(UAAttributeUploadResult result))completionHandler; /** * The current identifier associated with this registrar. */ @property (atomic, readonly, nullable) NSString *identifier; /** * Pending mutations. */ @property (nonatomic, readonly) UAAttributePendingMutations *pendingMutations; /** * The delegate to receive registrar callbacks. */ @property (nonatomic, weak) id<UAAttributeRegistrarDelegate> delegate; @end NS_ASSUME_NONNULL_END
28.744186
116
0.71521
[ "object" ]
bd79654e3d31e1ad1d2f15b31613052c9df37b69
4,792
h
C
src/MapleFE/shared/include/parser_rec.h
venshine/OpenArkCompiler
264cd4463834356658154f0d254672ef559f245f
[ "MulanPSL-1.0" ]
2
2019-09-06T07:02:41.000Z
2019-09-09T12:24:46.000Z
src/MapleFE/shared/include/parser_rec.h
venshine/OpenArkCompiler
264cd4463834356658154f0d254672ef559f245f
[ "MulanPSL-1.0" ]
null
null
null
src/MapleFE/shared/include/parser_rec.h
venshine/OpenArkCompiler
264cd4463834356658154f0d254672ef559f245f
[ "MulanPSL-1.0" ]
null
null
null
/* * Copyright (C) [2020] Futurewei Technologies, Inc. All rights reverved. * * OpenArkFE is licensed under the Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * * http://license.coscl.org.cn/MulanPSL2 * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR * FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. */ //////////////////////////////////////////////////////////////////////////// // The data strcutures used during parsing left recursions are defined // in this file. //////////////////////////////////////////////////////////////////////////// #ifndef __PARSER_REC_H__ #define __PARSER_REC_H__ #include "ruletable.h" namespace maplefe { // After we traverse successfully on a LeadFronNode or a Circle, we need record // the path which reach from LeadNode to the successful LeadFronNode or FronNode // of a circle. class AppealNode; class RecPath { public: AppealNode *mLeadNode; bool mInCircle; // true : from FronNode of a circle. // false: from a LeadFronNode unsigned mCircleIdx; // Index of the Circle in the circle vector of LeadNode. SmallVector<AppealNode*> mPath; // This is the final path we concluded. // first element is the LeadNode. // last element is the successful matching one. public: RecPath(){}; ~RecPath() {mPath.Release();} }; enum RecTraInstance { InstanceFirst, // we are handling the first instance InstanceRest, // we are handling the rest instances. InstanceNA }; // The parsing is done as a Wavefront traversal. It takes a recursion group // as a unit for traversal. Other rule tables not belonging to any recursion // groups are treated as a standalone unit without iteration. // Wavefront traversal on the recursion group is done by iterations. Each // iteration it walks through all recursion nodes of the group, and also the // FronNodes. If it hit a LeadNode for the second time in one iteration, it // takes the result of the previous iteration, and build a edge between // this iteration and the preivous iteration. class RecursionTraversal { private: Parser *mParser; Recursion *mRec; RuleTable *mRuleTable; AppealNode *mSelf; AppealNode *mParent; unsigned mGroupId; // Id of recursion group RecTraInstance mInstance; // These are the 1st appearance of current instance. SmallVector<AppealNode*> mLeadNodes; // These are the 1st appearance of previous instance. Used when // connect the 2nd appearance of current instance to the previous. SmallVector<AppealNode*> mPrevLeadNodes; // Visited LeadNodes. This is a per-iteration data. // // In each iteration, the first time a LeadNode is visited, it will be saved // in this vector. The second time it's visited, it should go to connect // with the node in the previous instance or set as Failed2ndOf1st. SmallVector<RuleTable*> mVisitedLeadNodes; // Visited Recursion Node. This is a per-iteration data too. // This doesn't include lead node. // Visiting an un-visited recursion node will do the regular traversal. // Visiting a visited recursion node will take the result of TraversalRuleTablePre(). SmallVector<RuleTable*> mVisitedRecursionNodes; SmallVector<AppealNode*> mAppealPoints; // places to start appealing bool mTrace; unsigned mIndentation; private: bool mSucc; unsigned mStartToken; bool FindFirstInstance(); bool FindRestInstance(); bool FindInstances(); void FinalConnection(); public: bool IsSucc() {return mSucc;} unsigned GetStartToken() {return mStartToken;} RecTraInstance GetInstance() {return mInstance;} unsigned LongestMatch() {return mSelf->LongestMatch();} void AddAppealPoint(AppealNode *n) {mAppealPoints.PushBack(n);} RuleTable* GetRuleTable() {return mRuleTable;} void SetTrace(bool b){mTrace = b;} void SetIndentation(unsigned i) {mIndentation = i;} void DumpIndentation(); void AddVisitedLeadNode(RuleTable *rt) {mVisitedLeadNodes.PushBack(rt);} bool LeadNodeVisited(RuleTable *rt) {return mVisitedLeadNodes.Find(rt);} void AddVisitedRecursionNode(RuleTable *rt) {mVisitedRecursionNodes.PushBack(rt);} bool RecursionNodeVisited(RuleTable *rt) {return mVisitedRecursionNodes.Find(rt);} void AddLeadNode(AppealNode *n) {mLeadNodes.PushBack(n);} public: RecursionTraversal(AppealNode *sel, AppealNode *parent, Parser *parser); ~RecursionTraversal(); void Work(); bool ConnectPrevious(AppealNode*); }; } #endif
34.47482
87
0.702212
[ "vector" ]
bd7b604bc8038f9b348408c1b0f72d16baddc65a
2,393
h
C
AFFEvent/AFFEvent/AFFEventHandler/AFFEventHandler.h
jfuellert/AFFEvent
949dfd54aa3385c75b518c73902c76eac220b5d9
[ "MIT" ]
1
2020-08-20T14:41:50.000Z
2020-08-20T14:41:50.000Z
AFFEvent/AFFEvent/AFFEventHandler/AFFEventHandler.h
jfuellert/AFFEvent
949dfd54aa3385c75b518c73902c76eac220b5d9
[ "MIT" ]
null
null
null
AFFEvent/AFFEvent/AFFEventHandler/AFFEventHandler.h
jfuellert/AFFEvent
949dfd54aa3385c75b518c73902c76eac220b5d9
[ "MIT" ]
null
null
null
// // AFFEventHandler.h // AF Apps // // Created by Jeremy Fuellert on 2013-04-10. // Copyright (c) 2013 AF Apps. All rights reserved. // // Permission is hereby granted, free of charge, to any person // obtaining a copy of this software and associated documentation // files (the "Software"), to deal in the Software without // restriction, including without limitation the rights to use, // copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following // conditions: // // The above copyright notice and this permission notice shall be // included in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, // EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES // OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND // NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT // HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, // WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. // #import "AFFEventStatics.h" @class AFFEvent; /** AFFEventHandler is a class used for sending an event from one object to another. */ @interface AFFEventHandler : NSObject /** The sender of the event. */ @property (nonatomic, assign) NSObject *sender; /** The observer of the event. */ @property (nonatomic, assign) NSObject *observer; /** The selector that the event will be sent to. */ @property (nonatomic, assign) SEL selector; /** Arguments that are passed to the selector. */ @property (nonatomic, retain) NSMutableArray *args; /** A bitewise mask of handler execution characteristics. */ @property (nonatomic, assign) AFFEventType type; /** A BOOL determining if the block is locked. If |isLocked| is 'YES' then it will not execute it's handler. */ @property (nonatomic, assign, setter = setLocked:) BOOL isLocked; /** The name of the event. */ @property (nonatomic, retain) NSString *eventNameWithHash; /** Returns an AFFEventHandler. */ + (AFFEventHandler *)eventHandlerWithSender:(id)sender observer:(id)observer selector:(SEL)selector name:(NSString *)name args:(NSArray *)args; - (void)invokeWithEvent:(AFFEvent *)event; @end
37.984127
143
0.736732
[ "object" ]
bd7c8034e1b0f25840b41682a27cc15367b50605
5,177
h
C
aws-cpp-sdk-appintegrations/include/aws/appintegrations/model/DataIntegrationSummary.h
perfectrecall/aws-sdk-cpp
fb8cbebf2fd62720b65aeff841ad2950e73d8ebd
[ "Apache-2.0" ]
1
2022-02-10T08:06:54.000Z
2022-02-10T08:06:54.000Z
aws-cpp-sdk-appintegrations/include/aws/appintegrations/model/DataIntegrationSummary.h
perfectrecall/aws-sdk-cpp
fb8cbebf2fd62720b65aeff841ad2950e73d8ebd
[ "Apache-2.0" ]
1
2022-01-03T23:59:37.000Z
2022-01-03T23:59:37.000Z
aws-cpp-sdk-appintegrations/include/aws/appintegrations/model/DataIntegrationSummary.h
ravindra-wagh/aws-sdk-cpp
7d5ff01b3c3b872f31ca98fb4ce868cd01e97696
[ "Apache-2.0" ]
1
2021-12-30T04:25:33.000Z
2021-12-30T04:25:33.000Z
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/appintegrations/AppIntegrationsService_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <utility> namespace Aws { namespace Utils { namespace Json { class JsonValue; class JsonView; } // namespace Json } // namespace Utils namespace AppIntegrationsService { namespace Model { /** * <p>Summary information about the DataIntegration.</p><p><h3>See Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/appintegrations-2020-07-29/DataIntegrationSummary">AWS * API Reference</a></p> */ class AWS_APPINTEGRATIONSSERVICE_API DataIntegrationSummary { public: DataIntegrationSummary(); DataIntegrationSummary(Aws::Utils::Json::JsonView jsonValue); DataIntegrationSummary& operator=(Aws::Utils::Json::JsonView jsonValue); Aws::Utils::Json::JsonValue Jsonize() const; /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline const Aws::String& GetArn() const{ return m_arn; } /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline bool ArnHasBeenSet() const { return m_arnHasBeenSet; } /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline void SetArn(const Aws::String& value) { m_arnHasBeenSet = true; m_arn = value; } /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline void SetArn(Aws::String&& value) { m_arnHasBeenSet = true; m_arn = std::move(value); } /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline void SetArn(const char* value) { m_arnHasBeenSet = true; m_arn.assign(value); } /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline DataIntegrationSummary& WithArn(const Aws::String& value) { SetArn(value); return *this;} /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline DataIntegrationSummary& WithArn(Aws::String&& value) { SetArn(std::move(value)); return *this;} /** * <p>The Amazon Resource Name (ARN) of the DataIntegration.</p> */ inline DataIntegrationSummary& WithArn(const char* value) { SetArn(value); return *this;} /** * <p>The name of the DataIntegration.</p> */ inline const Aws::String& GetName() const{ return m_name; } /** * <p>The name of the DataIntegration.</p> */ inline bool NameHasBeenSet() const { return m_nameHasBeenSet; } /** * <p>The name of the DataIntegration.</p> */ inline void SetName(const Aws::String& value) { m_nameHasBeenSet = true; m_name = value; } /** * <p>The name of the DataIntegration.</p> */ inline void SetName(Aws::String&& value) { m_nameHasBeenSet = true; m_name = std::move(value); } /** * <p>The name of the DataIntegration.</p> */ inline void SetName(const char* value) { m_nameHasBeenSet = true; m_name.assign(value); } /** * <p>The name of the DataIntegration.</p> */ inline DataIntegrationSummary& WithName(const Aws::String& value) { SetName(value); return *this;} /** * <p>The name of the DataIntegration.</p> */ inline DataIntegrationSummary& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} /** * <p>The name of the DataIntegration.</p> */ inline DataIntegrationSummary& WithName(const char* value) { SetName(value); return *this;} /** * <p>The URI of the data source.</p> */ inline const Aws::String& GetSourceURI() const{ return m_sourceURI; } /** * <p>The URI of the data source.</p> */ inline bool SourceURIHasBeenSet() const { return m_sourceURIHasBeenSet; } /** * <p>The URI of the data source.</p> */ inline void SetSourceURI(const Aws::String& value) { m_sourceURIHasBeenSet = true; m_sourceURI = value; } /** * <p>The URI of the data source.</p> */ inline void SetSourceURI(Aws::String&& value) { m_sourceURIHasBeenSet = true; m_sourceURI = std::move(value); } /** * <p>The URI of the data source.</p> */ inline void SetSourceURI(const char* value) { m_sourceURIHasBeenSet = true; m_sourceURI.assign(value); } /** * <p>The URI of the data source.</p> */ inline DataIntegrationSummary& WithSourceURI(const Aws::String& value) { SetSourceURI(value); return *this;} /** * <p>The URI of the data source.</p> */ inline DataIntegrationSummary& WithSourceURI(Aws::String&& value) { SetSourceURI(std::move(value)); return *this;} /** * <p>The URI of the data source.</p> */ inline DataIntegrationSummary& WithSourceURI(const char* value) { SetSourceURI(value); return *this;} private: Aws::String m_arn; bool m_arnHasBeenSet; Aws::String m_name; bool m_nameHasBeenSet; Aws::String m_sourceURI; bool m_sourceURIHasBeenSet; }; } // namespace Model } // namespace AppIntegrationsService } // namespace Aws
29.248588
118
0.644389
[ "model" ]
bd87e03f0c705ab174d2c37607a112085971e294
4,209
h
C
project2D/Agent.h
Krian0/ArtificialI
215aa409441af15dfaf1783dcb44392dc2178887
[ "MIT" ]
null
null
null
project2D/Agent.h
Krian0/ArtificialI
215aa409441af15dfaf1783dcb44392dc2178887
[ "MIT" ]
null
null
null
project2D/Agent.h
Krian0/ArtificialI
215aa409441af15dfaf1783dcb44392dc2178887
[ "MIT" ]
null
null
null
#pragma once #include "BehaviourEnum.h" #include "Renderer2D.h" #include "Texture.h" #include "Vector2.h" #include <vector> #include <stack> #include <map> using std::map; using std::stack; using std::vector; using aie::Texture; using aie::Renderer2D; class SteeringBehaviour; class SteeringForce; class StateMachine; class IBehaviour; class AStarGraph; class BoxObject; class Agent { public: Agent() {} ~Agent() {}; virtual void Update(float DeltaTime) = 0; virtual void Draw(Renderer2D* renderer) = 0; //Sets m_force to parameter. Takes a Vector2 as parameter void AddForce(Vector2 force); //Makes the Agent's sprite flicker to the hit-sprite and back several times upon being hit void OnHit(); //Sets m_wasAttacked to false void OnFlee(); //Handles collision through use of CollidePlus/CollideMinus. Takes two Vector2, a bool and a float as parameters void OnCollide(Vector2 Min, Vector2 Max, bool UsingAgent = false, float VelocityDegrade = 0.90f); //Handles collision through use of CollidePlus/CollideMinus. Takes a Vector2 as parameter void OnCollide(Vector2 OtherAgentPos); //Handles collision through for Objects specifically. Takes a BoxObject* as parameter void OnCollide(BoxObject* Object); //Sets m_pathfindingMode to given boolean value. Takes a bool as parameter void SetPathfindingMode(bool BooleanValue); //Set m_targets to given Agent vector. Takes an Agent* vector as parameter void SetTargetList(vector<Agent*> Targets); //Add given Agent to m_friends. Takes an Agent* as parameter void AddFriendToList(Agent* Friend); //Sets m_objects to given BoxObject vector. Takes a BoxObject* vector as parameter void AddObjects(vector<BoxObject*> Objects); //Set State at given Steering Enum in SteeringForce map to given SteeringForce. Takes a SteeringE (Enum) and SteeringForce* as parameters void AddSteering(SteeringE Steering_Enum, SteeringForce* Steering_Force); //Set State at given Steering Enum in SteeringForce map to NULL. Takes a SteeringE (Enum) as parameter void RemoveSteering(SteeringE Steering_Enum); //Returns Agent* vector m_targets vector<Agent*> GetTargets(); //Returns Agent* vector m_friends vector<Agent*> GetFriends(); //Returns BoxObject* vector m_objects vector<BoxObject*> GetObjects(); //Returns Vector2 m_position Vector2 GetPos(); //Returns Vector2 m_velocity Vector2 GetCurrentVelocity(); //Returns float m_radius float GetRadius(); //Returns bool m_wasAttacked. Used in AttackState bool WasAttacked(); //Returns bool m_isPlayer. Used in controlling States for the player (mainly in AttackState) bool IsAgentPlayer(); //Returns true if given Vector2 (with a radius of Agent's radius) intersects with Agent, otherwise returns false. Takes a Vector2 as parameter bool IsCollidingWithNode(Vector2 Point); //Returns true if parameter Agent is colliding with the Agent, otherwise returns false. Takes an Agent* as parameter bool IsColliding(Agent* The_Target); //Returns bool m_pathfindingMode bool PathfindingModeIsOn(); //Return a Vector2 stack through AStar with the closest Node to Agent (start) and a random target Node (end) stack<Vector2> GetPathfindingVectors(); Vector2 m_currentlySeeking; int m_sightRange; int m_attackRange; int m_velocityLimit; protected: //Move Agent so that they no longer collide, reverse velocity. Takes four float and a bool as parameters void CollidePlus(float Boundary, float DegradeValue, float &Pos, float &Vel, bool UsingAgent); //Move Agent so that they no longer collide, reverse velocity. Takes four float and a bool as parameters void CollideMinus(float Boundary, float DegradeValue, float &Pos, float &Vel, bool UsingAgent); //Body Texture* m_sprite; Texture* m_hitSprite; Vector2 m_position; Vector2 m_force; Vector2 m_velocity; float m_radius; //Brain map<BehaviourE, IBehaviour*> m_behaviours; StateMachine* m_stateMachine; Vector2 m_windowSize; vector<Agent*> m_targets; vector<Agent*> m_friends; vector<BoxObject*> m_objects; AStarGraph* m_pathfinding; float m_flickerTime; int m_flickerCounter; bool m_firstRound; bool m_wasAttacked; bool m_isPlayer; bool m_pathfindingMode; };
28.632653
143
0.770729
[ "object", "vector" ]
bd8d496c957f271255630c11a066c8bbf2b1e8ef
1,154
h
C
RemoteDataSync/RemoteDataSync/RDSDataStore.h
remizorrr/RemoteDataSync
dae98929a07fbb4eb3a7b2b44a66e1f957509c5e
[ "Apache-2.0" ]
null
null
null
RemoteDataSync/RemoteDataSync/RDSDataStore.h
remizorrr/RemoteDataSync
dae98929a07fbb4eb3a7b2b44a66e1f957509c5e
[ "Apache-2.0" ]
null
null
null
RemoteDataSync/RemoteDataSync/RDSDataStore.h
remizorrr/RemoteDataSync
dae98929a07fbb4eb3a7b2b44a66e1f957509c5e
[ "Apache-2.0" ]
1
2016-01-14T22:31:24.000Z
2016-01-14T22:31:24.000Z
// // RDSDataStore.h // Synchora // // Created by Anton Remizov on 3/25/15. // Copyright (c) 2015 synchora. All rights reserved. // #import <Foundation/Foundation.h> #import "RDSObjectCache.h" #import "RDSMappingProvider.h" @protocol RDSDataStore <NSObject> @property (nonatomic, strong) id<RDSObjectCache> objectCache; @property (nonatomic, strong) id<RDSMappingProvider> mappingProvider; - (id) createUniqueObjectOfType:(NSString*)type; - (id) createObjectOfType:(NSString*)type; - (void) deleteObject:(id)object; - (void) deleteObjectsOfType:(NSString*)object; - (NSArray*) objectsOfType:(NSString*)type; - (NSArray*) objectsOfType:(NSString*)type withValue:(id<NSCopying>)value forKey:(NSString*)key; - (NSArray*) objectsOfType:(NSString*)type withValue:(id<NSCopying>)value forKey:(NSString*)key create:(BOOL)create; - (NSArray*) objectsOfType:(NSString*)type forPredicate:(NSPredicate*) predicate; - (void) save; - (void) revert; - (void) wipeStorage; - (void) scheduleObjectDeletion:(id)object; - (NSArray*) objectsScheduledForDeletion; - (BOOL) object:(id)object hasProperty:(NSString*)property; - (BOOL) canStoreObject:(id)object; @end
32.971429
116
0.747834
[ "object" ]
bd91f722f2f10187121655bbfc058f1e76df134c
20,199
c
C
ThirdParty/oggtheora/vtkoggtheora/libtheora-1.1.1/lib/x86/mmxfdct.c
inviCRO/VTK
a2dc2e79d4ecb8f6da900535b32e1a2a702c7f48
[ "BSD-3-Clause" ]
3
2020-06-20T23:31:06.000Z
2021-01-11T02:17:16.000Z
Utilities/vtkoggtheora/libtheora-1.1.1/lib/x86/mmxfdct.c
Armand0s/homemade_vtk
6bc7b595a4a7f86e8fa969d067360450fa4e0a6a
[ "BSD-3-Clause" ]
14
2015-04-25T17:54:13.000Z
2017-01-13T15:30:39.000Z
Utilities/vtkoggtheora/libtheora-1.1.1/lib/x86/mmxfdct.c
Armand0s/homemade_vtk
6bc7b595a4a7f86e8fa969d067360450fa4e0a6a
[ "BSD-3-Clause" ]
5
2015-10-09T04:12:29.000Z
2021-12-15T16:57:11.000Z
/******************************************************************** * * * THIS FILE IS PART OF THE OggTheora SOFTWARE CODEC SOURCE CODE. * * USE, DISTRIBUTION AND REPRODUCTION OF THIS LIBRARY SOURCE IS * * GOVERNED BY A BSD-STYLE SOURCE LICENSE INCLUDED WITH THIS SOURCE * * IN 'COPYING'. PLEASE READ THESE TERMS BEFORE DISTRIBUTING. * * * * THE Theora SOURCE CODE IS COPYRIGHT (C) 1999-2006 * * by the Xiph.Org Foundation http://www.xiph.org/ * * * ********************************************************************/ /*MMX fDCT implementation for x86_32*/ /*Id*/ #include "x86enc.h" #if defined(OC_X86_ASM) # define OC_FDCT_STAGE1_8x4 \ "#OC_FDCT_STAGE1_8x4\n\t" \ /*Stage 1:*/ \ /*mm0=t7'=t0-t7*/ \ "psubw %%mm7,%%mm0\n\t" \ "paddw %%mm7,%%mm7\n\t" \ /*mm1=t6'=t1-t6*/ \ "psubw %%mm6,%%mm1\n\t" \ "paddw %%mm6,%%mm6\n\t" \ /*mm2=t5'=t2-t5*/ \ "psubw %%mm5,%%mm2\n\t" \ "paddw %%mm5,%%mm5\n\t" \ /*mm3=t4'=t3-t4*/ \ "psubw %%mm4,%%mm3\n\t" \ "paddw %%mm4,%%mm4\n\t" \ /*mm7=t0'=t0+t7*/ \ "paddw %%mm0,%%mm7\n\t" \ /*mm6=t1'=t1+t6*/ \ "paddw %%mm1,%%mm6\n\t" \ /*mm5=t2'=t2+t5*/ \ "paddw %%mm2,%%mm5\n\t" \ /*mm4=t3'=t3+t4*/ \ "paddw %%mm3,%%mm4\n\t" \ # define OC_FDCT8x4(_r0,_r1,_r2,_r3,_r4,_r5,_r6,_r7) \ "#OC_FDCT8x4\n\t" \ /*Stage 2:*/ \ /*mm7=t3''=t0'-t3'*/ \ "psubw %%mm4,%%mm7\n\t" \ "paddw %%mm4,%%mm4\n\t" \ /*mm6=t2''=t1'-t2'*/ \ "psubw %%mm5,%%mm6\n\t" \ "movq %%mm7,"_r6"(%[y])\n\t" \ "paddw %%mm5,%%mm5\n\t" \ /*mm1=t5''=t6'-t5'*/ \ "psubw %%mm2,%%mm1\n\t" \ "movq %%mm6,"_r2"(%[y])\n\t" \ /*mm4=t0''=t0'+t3'*/ \ "paddw %%mm7,%%mm4\n\t" \ "paddw %%mm2,%%mm2\n\t" \ /*mm5=t1''=t1'+t2'*/ \ "movq %%mm4,"_r0"(%[y])\n\t" \ "paddw %%mm6,%%mm5\n\t" \ /*mm2=t6''=t6'+t5'*/ \ "paddw %%mm1,%%mm2\n\t" \ "movq %%mm5,"_r4"(%[y])\n\t" \ /*mm0=t7', mm1=t5'', mm2=t6'', mm3=t4'.*/ \ /*mm4, mm5, mm6, mm7 are free.*/ \ /*Stage 3:*/ \ /*mm6={2}x4, mm7={27146,0xB500>>1}x2*/ \ "mov $0x5A806A0A,%[a]\n\t" \ "pcmpeqb %%mm6,%%mm6\n\t" \ "movd %[a],%%mm7\n\t" \ "psrlw $15,%%mm6\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "paddw %%mm6,%%mm6\n\t" \ /*mm0=0, m2={-1}x4 \ mm5:mm4=t5''*27146+0xB500*/ \ "movq %%mm1,%%mm4\n\t" \ "movq %%mm1,%%mm5\n\t" \ "punpcklwd %%mm6,%%mm4\n\t" \ "movq %%mm2,"_r3"(%[y])\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "movq %%mm0,"_r7"(%[y])\n\t" \ "punpckhwd %%mm6,%%mm5\n\t" \ "pxor %%mm0,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "pcmpeqb %%mm2,%%mm2\n\t" \ /*mm2=t6'', mm1=t5''+(t5''!=0) \ mm4=(t5''*27146+0xB500>>16)*/ \ "pcmpeqw %%mm1,%%mm0\n\t" \ "psrad $16,%%mm4\n\t" \ "psubw %%mm2,%%mm0\n\t" \ "movq "_r3"(%[y]),%%mm2\n\t" \ "psrad $16,%%mm5\n\t" \ "paddw %%mm0,%%mm1\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ /*mm4=s=(t5''*27146+0xB500>>16)+t5''+(t5''!=0)>>1*/ \ "paddw %%mm1,%%mm4\n\t" \ "movq "_r7"(%[y]),%%mm0\n\t" \ "psraw $1,%%mm4\n\t" \ "movq %%mm3,%%mm1\n\t" \ /*mm3=t4''=t4'+s*/ \ "paddw %%mm4,%%mm3\n\t" \ /*mm1=t5'''=t4'-s*/ \ "psubw %%mm4,%%mm1\n\t" \ /*mm1=0, mm3={-1}x4 \ mm5:mm4=t6''*27146+0xB500*/ \ "movq %%mm2,%%mm4\n\t" \ "movq %%mm2,%%mm5\n\t" \ "punpcklwd %%mm6,%%mm4\n\t" \ "movq %%mm1,"_r5"(%[y])\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "movq %%mm3,"_r1"(%[y])\n\t" \ "punpckhwd %%mm6,%%mm5\n\t" \ "pxor %%mm1,%%mm1\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "pcmpeqb %%mm3,%%mm3\n\t" \ /*mm2=t6''+(t6''!=0), mm4=(t6''*27146+0xB500>>16)*/ \ "psrad $16,%%mm4\n\t" \ "pcmpeqw %%mm2,%%mm1\n\t" \ "psrad $16,%%mm5\n\t" \ "psubw %%mm3,%%mm1\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "paddw %%mm1,%%mm2\n\t" \ /*mm1=t1'' \ mm4=s=(t6''*27146+0xB500>>16)+t6''+(t6''!=0)>>1*/ \ "paddw %%mm2,%%mm4\n\t" \ "movq "_r4"(%[y]),%%mm1\n\t" \ "psraw $1,%%mm4\n\t" \ "movq %%mm0,%%mm2\n\t" \ /*mm7={54491-0x7FFF,0x7FFF}x2 \ mm0=t7''=t7'+s*/ \ "paddw %%mm4,%%mm0\n\t" \ /*mm2=t6'''=t7'-s*/ \ "psubw %%mm4,%%mm2\n\t" \ /*Stage 4:*/ \ /*mm0=0, mm2=t0'' \ mm5:mm4=t1''*27146+0xB500*/ \ "movq %%mm1,%%mm4\n\t" \ "movq %%mm1,%%mm5\n\t" \ "punpcklwd %%mm6,%%mm4\n\t" \ "movq %%mm2,"_r3"(%[y])\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "movq "_r0"(%[y]),%%mm2\n\t" \ "punpckhwd %%mm6,%%mm5\n\t" \ "movq %%mm0,"_r7"(%[y])\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "pxor %%mm0,%%mm0\n\t" \ /*mm7={27146,0x4000>>1}x2 \ mm0=s=(t1''*27146+0xB500>>16)+t1''+(t1''!=0)*/ \ "psrad $16,%%mm4\n\t" \ "mov $0x20006A0A,%[a]\n\t" \ "pcmpeqw %%mm1,%%mm0\n\t" \ "movd %[a],%%mm7\n\t" \ "psrad $16,%%mm5\n\t" \ "psubw %%mm3,%%mm0\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "paddw %%mm1,%%mm0\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "paddw %%mm4,%%mm0\n\t" \ /*mm6={0x00000E3D}x2 \ mm1=-(t0''==0), mm5:mm4=t0''*27146+0x4000*/ \ "movq %%mm2,%%mm4\n\t" \ "movq %%mm2,%%mm5\n\t" \ "punpcklwd %%mm6,%%mm4\n\t" \ "mov $0x0E3D,%[a]\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "punpckhwd %%mm6,%%mm5\n\t" \ "movd %[a],%%mm6\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "pxor %%mm1,%%mm1\n\t" \ "punpckldq %%mm6,%%mm6\n\t" \ "pcmpeqw %%mm2,%%mm1\n\t" \ /*mm4=r=(t0''*27146+0x4000>>16)+t0''+(t0''!=0)*/ \ "psrad $16,%%mm4\n\t" \ "psubw %%mm3,%%mm1\n\t" \ "psrad $16,%%mm5\n\t" \ "paddw %%mm1,%%mm2\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "movq "_r5"(%[y]),%%mm1\n\t" \ "paddw %%mm2,%%mm4\n\t" \ /*mm2=t6'', mm0=_y[0]=u=r+s>>1 \ The naive implementation could cause overflow, so we use \ u=(r&s)+((r^s)>>1).*/ \ "movq "_r3"(%[y]),%%mm2\n\t" \ "movq %%mm0,%%mm7\n\t" \ "pxor %%mm4,%%mm0\n\t" \ "pand %%mm4,%%mm7\n\t" \ "psraw $1,%%mm0\n\t" \ "mov $0x7FFF54DC,%[a]\n\t" \ "paddw %%mm7,%%mm0\n\t" \ "movd %[a],%%mm7\n\t" \ /*mm7={54491-0x7FFF,0x7FFF}x2 \ mm4=_y[4]=v=r-u*/ \ "psubw %%mm0,%%mm4\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "movq %%mm4,"_r4"(%[y])\n\t" \ /*mm0=0, mm7={36410}x4 \ mm1=(t5'''!=0), mm5:mm4=54491*t5'''+0x0E3D*/ \ "movq %%mm1,%%mm4\n\t" \ "movq %%mm1,%%mm5\n\t" \ "punpcklwd %%mm1,%%mm4\n\t" \ "mov $0x8E3A8E3A,%[a]\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "movq %%mm0,"_r0"(%[y])\n\t" \ "punpckhwd %%mm1,%%mm5\n\t" \ "pxor %%mm0,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "pcmpeqw %%mm0,%%mm1\n\t" \ "movd %[a],%%mm7\n\t" \ "psubw %%mm3,%%mm1\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "paddd %%mm6,%%mm4\n\t" \ "paddd %%mm6,%%mm5\n\t" \ /*mm0=0 \ mm3:mm1=36410*t6'''+((t5'''!=0)<<16)*/ \ "movq %%mm2,%%mm6\n\t" \ "movq %%mm2,%%mm3\n\t" \ "pmulhw %%mm7,%%mm6\n\t" \ "paddw %%mm2,%%mm1\n\t" \ "pmullw %%mm7,%%mm3\n\t" \ "pxor %%mm0,%%mm0\n\t" \ "paddw %%mm1,%%mm6\n\t" \ "movq %%mm3,%%mm1\n\t" \ "punpckhwd %%mm6,%%mm3\n\t" \ "punpcklwd %%mm6,%%mm1\n\t" \ /*mm3={-1}x4, mm6={1}x4 \ mm4=_y[5]=u=(54491*t5'''+36410*t6'''+0x0E3D>>16)+(t5'''!=0)*/ \ "paddd %%mm3,%%mm5\n\t" \ "paddd %%mm1,%%mm4\n\t" \ "psrad $16,%%mm5\n\t" \ "pxor %%mm6,%%mm6\n\t" \ "psrad $16,%%mm4\n\t" \ "pcmpeqb %%mm3,%%mm3\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "psubw %%mm3,%%mm6\n\t" \ /*mm1=t7'', mm7={26568,0x3400}x2 \ mm2=s=t6'''-(36410*u>>16)*/ \ "movq %%mm4,%%mm1\n\t" \ "mov $0x340067C8,%[a]\n\t" \ "pmulhw %%mm7,%%mm4\n\t" \ "movd %[a],%%mm7\n\t" \ "movq %%mm1,"_r5"(%[y])\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "paddw %%mm1,%%mm4\n\t" \ "movq "_r7"(%[y]),%%mm1\n\t" \ "psubw %%mm4,%%mm2\n\t" \ /*mm6={0x00007B1B}x2 \ mm0=(s!=0), mm5:mm4=s*26568+0x3400*/ \ "movq %%mm2,%%mm4\n\t" \ "movq %%mm2,%%mm5\n\t" \ "punpcklwd %%mm6,%%mm4\n\t" \ "pcmpeqw %%mm2,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "mov $0x7B1B,%[a]\n\t" \ "punpckhwd %%mm6,%%mm5\n\t" \ "movd %[a],%%mm6\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "psubw %%mm3,%%mm0\n\t" \ "punpckldq %%mm6,%%mm6\n\t" \ /*mm7={64277-0x7FFF,0x7FFF}x2 \ mm2=_y[3]=v=(s*26568+0x3400>>17)+s+(s!=0)*/ \ "psrad $17,%%mm4\n\t" \ "paddw %%mm0,%%mm2\n\t" \ "psrad $17,%%mm5\n\t" \ "mov $0x7FFF7B16,%[a]\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "movd %[a],%%mm7\n\t" \ "paddw %%mm4,%%mm2\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ /*mm0=0, mm7={12785}x4 \ mm1=(t7''!=0), mm2=t4'', mm5:mm4=64277*t7''+0x7B1B*/ \ "movq %%mm1,%%mm4\n\t" \ "movq %%mm1,%%mm5\n\t" \ "movq %%mm2,"_r3"(%[y])\n\t" \ "punpcklwd %%mm1,%%mm4\n\t" \ "movq "_r1"(%[y]),%%mm2\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "mov $0x31F131F1,%[a]\n\t" \ "punpckhwd %%mm1,%%mm5\n\t" \ "pxor %%mm0,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "pcmpeqw %%mm0,%%mm1\n\t" \ "movd %[a],%%mm7\n\t" \ "psubw %%mm3,%%mm1\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "paddd %%mm6,%%mm4\n\t" \ "paddd %%mm6,%%mm5\n\t" \ /*mm3:mm1=12785*t4'''+((t7''!=0)<<16)*/ \ "movq %%mm2,%%mm6\n\t" \ "movq %%mm2,%%mm3\n\t" \ "pmulhw %%mm7,%%mm6\n\t" \ "pmullw %%mm7,%%mm3\n\t" \ "paddw %%mm1,%%mm6\n\t" \ "movq %%mm3,%%mm1\n\t" \ "punpckhwd %%mm6,%%mm3\n\t" \ "punpcklwd %%mm6,%%mm1\n\t" \ /*mm3={-1}x4, mm6={1}x4 \ mm4=_y[1]=u=(12785*t4'''+64277*t7''+0x7B1B>>16)+(t7''!=0)*/ \ "paddd %%mm3,%%mm5\n\t" \ "paddd %%mm1,%%mm4\n\t" \ "psrad $16,%%mm5\n\t" \ "pxor %%mm6,%%mm6\n\t" \ "psrad $16,%%mm4\n\t" \ "pcmpeqb %%mm3,%%mm3\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "psubw %%mm3,%%mm6\n\t" \ /*mm1=t3'', mm7={20539,0x3000}x2 \ mm4=s=(12785*u>>16)-t4''*/ \ "movq %%mm4,"_r1"(%[y])\n\t" \ "pmulhw %%mm7,%%mm4\n\t" \ "mov $0x3000503B,%[a]\n\t" \ "movq "_r6"(%[y]),%%mm1\n\t" \ "movd %[a],%%mm7\n\t" \ "psubw %%mm2,%%mm4\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ /*mm6={0x00006CB7}x2 \ mm0=(s!=0), mm5:mm4=s*20539+0x3000*/ \ "movq %%mm4,%%mm5\n\t" \ "movq %%mm4,%%mm2\n\t" \ "punpcklwd %%mm6,%%mm4\n\t" \ "pcmpeqw %%mm2,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "mov $0x6CB7,%[a]\n\t" \ "punpckhwd %%mm6,%%mm5\n\t" \ "movd %[a],%%mm6\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "psubw %%mm3,%%mm0\n\t" \ "punpckldq %%mm6,%%mm6\n\t" \ /*mm7={60547-0x7FFF,0x7FFF}x2 \ mm2=_y[7]=v=(s*20539+0x3000>>20)+s+(s!=0)*/ \ "psrad $20,%%mm4\n\t" \ "paddw %%mm0,%%mm2\n\t" \ "psrad $20,%%mm5\n\t" \ "mov $0x7FFF6C84,%[a]\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ "movd %[a],%%mm7\n\t" \ "paddw %%mm4,%%mm2\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ /*mm0=0, mm7={25080}x4 \ mm2=t2'', mm5:mm4=60547*t3''+0x6CB7*/ \ "movq %%mm1,%%mm4\n\t" \ "movq %%mm1,%%mm5\n\t" \ "movq %%mm2,"_r7"(%[y])\n\t" \ "punpcklwd %%mm1,%%mm4\n\t" \ "movq "_r2"(%[y]),%%mm2\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "mov $0x61F861F8,%[a]\n\t" \ "punpckhwd %%mm1,%%mm5\n\t" \ "pxor %%mm0,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm5\n\t" \ "movd %[a],%%mm7\n\t" \ "pcmpeqw %%mm0,%%mm1\n\t" \ "psubw %%mm3,%%mm1\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "paddd %%mm6,%%mm4\n\t" \ "paddd %%mm6,%%mm5\n\t" \ /*mm3:mm1=25080*t2''+((t3''!=0)<<16)*/ \ "movq %%mm2,%%mm6\n\t" \ "movq %%mm2,%%mm3\n\t" \ "pmulhw %%mm7,%%mm6\n\t" \ "pmullw %%mm7,%%mm3\n\t" \ "paddw %%mm1,%%mm6\n\t" \ "movq %%mm3,%%mm1\n\t" \ "punpckhwd %%mm6,%%mm3\n\t" \ "punpcklwd %%mm6,%%mm1\n\t" \ /*mm1={-1}x4 \ mm4=u=(25080*t2''+60547*t3''+0x6CB7>>16)+(t3''!=0)*/ \ "paddd %%mm3,%%mm5\n\t" \ "paddd %%mm1,%%mm4\n\t" \ "psrad $16,%%mm5\n\t" \ "mov $0x28005460,%[a]\n\t" \ "psrad $16,%%mm4\n\t" \ "pcmpeqb %%mm1,%%mm1\n\t" \ "packssdw %%mm5,%%mm4\n\t" \ /*mm5={1}x4, mm6=_y[2]=u, mm7={21600,0x2800}x2 \ mm4=s=(25080*u>>16)-t2''*/ \ "movq %%mm4,%%mm6\n\t" \ "pmulhw %%mm7,%%mm4\n\t" \ "pxor %%mm5,%%mm5\n\t" \ "movd %[a],%%mm7\n\t" \ "psubw %%mm1,%%mm5\n\t" \ "punpckldq %%mm7,%%mm7\n\t" \ "psubw %%mm2,%%mm4\n\t" \ /*mm2=s+(s!=0) \ mm4:mm3=s*21600+0x2800*/ \ "movq %%mm4,%%mm3\n\t" \ "movq %%mm4,%%mm2\n\t" \ "punpckhwd %%mm5,%%mm4\n\t" \ "pcmpeqw %%mm2,%%mm0\n\t" \ "pmaddwd %%mm7,%%mm4\n\t" \ "psubw %%mm1,%%mm0\n\t" \ "punpcklwd %%mm5,%%mm3\n\t" \ "paddw %%mm0,%%mm2\n\t" \ "pmaddwd %%mm7,%%mm3\n\t" \ /*mm0=_y[4], mm1=_y[7], mm4=_y[0], mm5=_y[5] \ mm3=_y[6]=v=(s*21600+0x2800>>18)+s+(s!=0)*/ \ "movq "_r4"(%[y]),%%mm0\n\t" \ "psrad $18,%%mm4\n\t" \ "movq "_r5"(%[y]),%%mm5\n\t" \ "psrad $18,%%mm3\n\t" \ "movq "_r7"(%[y]),%%mm1\n\t" \ "packssdw %%mm4,%%mm3\n\t" \ "movq "_r0"(%[y]),%%mm4\n\t" \ "paddw %%mm2,%%mm3\n\t" \ /*On input, mm4=_y[0], mm6=_y[2], mm0=_y[4], mm5=_y[5], mm3=_y[6], mm1=_y[7]. On output, {_y[4],mm1,mm2,mm3} contains the transpose of _y[4...7] and {mm4,mm5,mm6,mm7} contains the transpose of _y[0...3].*/ # define OC_TRANSPOSE8x4(_r0,_r1,_r2,_r3,_r4,_r5,_r6,_r7) \ "#OC_TRANSPOSE8x4\n\t" \ /*First 4x4 transpose:*/ \ /*mm0 = e3 e2 e1 e0 \ mm5 = f3 f2 f1 f0 \ mm3 = g3 g2 g1 g0 \ mm1 = h3 h2 h1 h0*/ \ "movq %%mm0,%%mm2\n\t" \ "punpcklwd %%mm5,%%mm0\n\t" \ "punpckhwd %%mm5,%%mm2\n\t" \ "movq %%mm3,%%mm5\n\t" \ "punpcklwd %%mm1,%%mm3\n\t" \ "punpckhwd %%mm1,%%mm5\n\t" \ /*mm0 = f1 e1 f0 e0 \ mm2 = f3 e3 f2 e2 \ mm3 = h1 g1 h0 g0 \ mm5 = h3 g3 h2 g2*/ \ "movq %%mm0,%%mm1\n\t" \ "punpckldq %%mm3,%%mm0\n\t" \ "movq %%mm0,"_r4"(%[y])\n\t" \ "punpckhdq %%mm3,%%mm1\n\t" \ "movq "_r1"(%[y]),%%mm0\n\t" \ "movq %%mm2,%%mm3\n\t" \ "punpckldq %%mm5,%%mm2\n\t" \ "punpckhdq %%mm5,%%mm3\n\t" \ "movq "_r3"(%[y]),%%mm5\n\t" \ /*_y[4] = h0 g0 f0 e0 \ mm1 = h1 g1 f1 e1 \ mm2 = h2 g2 f2 e2 \ mm3 = h3 g3 f3 e3*/ \ /*Second 4x4 transpose:*/ \ /*mm4 = a3 a2 a1 a0 \ mm0 = b3 b2 b1 b0 \ mm6 = c3 c2 c1 c0 \ mm5 = d3 d2 d1 d0*/ \ "movq %%mm4,%%mm7\n\t" \ "punpcklwd %%mm0,%%mm4\n\t" \ "punpckhwd %%mm0,%%mm7\n\t" \ "movq %%mm6,%%mm0\n\t" \ "punpcklwd %%mm5,%%mm6\n\t" \ "punpckhwd %%mm5,%%mm0\n\t" \ /*mm4 = b1 a1 b0 a0 \ mm7 = b3 a3 b2 a2 \ mm6 = d1 c1 d0 c0 \ mm0 = d3 c3 d2 c2*/ \ "movq %%mm4,%%mm5\n\t" \ "punpckldq %%mm6,%%mm4\n\t" \ "punpckhdq %%mm6,%%mm5\n\t" \ "movq %%mm7,%%mm6\n\t" \ "punpckhdq %%mm0,%%mm7\n\t" \ "punpckldq %%mm0,%%mm6\n\t" \ /*mm4 = d0 c0 b0 a0 \ mm5 = d1 c1 b1 a1 \ mm6 = d2 c2 b2 a2 \ mm7 = d3 c3 b3 a3*/ \ /*MMX implementation of the fDCT.*/ void oc_enc_fdct8x8_mmx(ogg_int16_t _y[64],const ogg_int16_t _x[64]){ ptrdiff_t a; __asm__ __volatile__( /*Add two extra bits of working precision to improve accuracy; any more and we could overflow.*/ /*We also add biases to correct for some systematic error that remains in the full fDCT->iDCT round trip.*/ "movq 0x00(%[x]),%%mm0\n\t" "movq 0x10(%[x]),%%mm1\n\t" "movq 0x20(%[x]),%%mm2\n\t" "movq 0x30(%[x]),%%mm3\n\t" "pcmpeqb %%mm4,%%mm4\n\t" "pxor %%mm7,%%mm7\n\t" "movq %%mm0,%%mm5\n\t" "psllw $2,%%mm0\n\t" "pcmpeqw %%mm7,%%mm5\n\t" "movq 0x70(%[x]),%%mm7\n\t" "psllw $2,%%mm1\n\t" "psubw %%mm4,%%mm5\n\t" "psllw $2,%%mm2\n\t" "mov $1,%[a]\n\t" "pslld $16,%%mm5\n\t" "movd %[a],%%mm6\n\t" "psllq $16,%%mm5\n\t" "mov $0x10001,%[a]\n\t" "psllw $2,%%mm3\n\t" "movd %[a],%%mm4\n\t" "punpckhwd %%mm6,%%mm5\n\t" "psubw %%mm6,%%mm1\n\t" "movq 0x60(%[x]),%%mm6\n\t" "paddw %%mm5,%%mm0\n\t" "movq 0x50(%[x]),%%mm5\n\t" "paddw %%mm4,%%mm0\n\t" "movq 0x40(%[x]),%%mm4\n\t" /*We inline stage1 of the transform here so we can get better instruction scheduling with the shifts.*/ /*mm0=t7'=t0-t7*/ "psllw $2,%%mm7\n\t" "psubw %%mm7,%%mm0\n\t" "psllw $2,%%mm6\n\t" "paddw %%mm7,%%mm7\n\t" /*mm1=t6'=t1-t6*/ "psllw $2,%%mm5\n\t" "psubw %%mm6,%%mm1\n\t" "psllw $2,%%mm4\n\t" "paddw %%mm6,%%mm6\n\t" /*mm2=t5'=t2-t5*/ "psubw %%mm5,%%mm2\n\t" "paddw %%mm5,%%mm5\n\t" /*mm3=t4'=t3-t4*/ "psubw %%mm4,%%mm3\n\t" "paddw %%mm4,%%mm4\n\t" /*mm7=t0'=t0+t7*/ "paddw %%mm0,%%mm7\n\t" /*mm6=t1'=t1+t6*/ "paddw %%mm1,%%mm6\n\t" /*mm5=t2'=t2+t5*/ "paddw %%mm2,%%mm5\n\t" /*mm4=t3'=t3+t4*/ "paddw %%mm3,%%mm4\n\t" OC_FDCT8x4("0x00","0x10","0x20","0x30","0x40","0x50","0x60","0x70") OC_TRANSPOSE8x4("0x00","0x10","0x20","0x30","0x40","0x50","0x60","0x70") /*Swap out this 8x4 block for the next one.*/ "movq 0x08(%[x]),%%mm0\n\t" "movq %%mm7,0x30(%[y])\n\t" "movq 0x78(%[x]),%%mm7\n\t" "movq %%mm1,0x50(%[y])\n\t" "movq 0x18(%[x]),%%mm1\n\t" "movq %%mm6,0x20(%[y])\n\t" "movq 0x68(%[x]),%%mm6\n\t" "movq %%mm2,0x60(%[y])\n\t" "movq 0x28(%[x]),%%mm2\n\t" "movq %%mm5,0x10(%[y])\n\t" "movq 0x58(%[x]),%%mm5\n\t" "movq %%mm3,0x70(%[y])\n\t" "movq 0x38(%[x]),%%mm3\n\t" /*And increase its working precision, too.*/ "psllw $2,%%mm0\n\t" "movq %%mm4,0x00(%[y])\n\t" "psllw $2,%%mm7\n\t" "movq 0x48(%[x]),%%mm4\n\t" /*We inline stage1 of the transform here so we can get better instruction scheduling with the shifts.*/ /*mm0=t7'=t0-t7*/ "psubw %%mm7,%%mm0\n\t" "psllw $2,%%mm1\n\t" "paddw %%mm7,%%mm7\n\t" "psllw $2,%%mm6\n\t" /*mm1=t6'=t1-t6*/ "psubw %%mm6,%%mm1\n\t" "psllw $2,%%mm2\n\t" "paddw %%mm6,%%mm6\n\t" "psllw $2,%%mm5\n\t" /*mm2=t5'=t2-t5*/ "psubw %%mm5,%%mm2\n\t" "psllw $2,%%mm3\n\t" "paddw %%mm5,%%mm5\n\t" "psllw $2,%%mm4\n\t" /*mm3=t4'=t3-t4*/ "psubw %%mm4,%%mm3\n\t" "paddw %%mm4,%%mm4\n\t" /*mm7=t0'=t0+t7*/ "paddw %%mm0,%%mm7\n\t" /*mm6=t1'=t1+t6*/ "paddw %%mm1,%%mm6\n\t" /*mm5=t2'=t2+t5*/ "paddw %%mm2,%%mm5\n\t" /*mm4=t3'=t3+t4*/ "paddw %%mm3,%%mm4\n\t" OC_FDCT8x4("0x08","0x18","0x28","0x38","0x48","0x58","0x68","0x78") OC_TRANSPOSE8x4("0x08","0x18","0x28","0x38","0x48","0x58","0x68","0x78") /*Here the first 4x4 block of output from the last transpose is the second 4x4 block of input for the next transform. We have cleverly arranged that it already be in the appropriate place, so we only have to do half the stores and loads.*/ "movq 0x00(%[y]),%%mm0\n\t" "movq %%mm1,0x58(%[y])\n\t" "movq 0x10(%[y]),%%mm1\n\t" "movq %%mm2,0x68(%[y])\n\t" "movq 0x20(%[y]),%%mm2\n\t" "movq %%mm3,0x78(%[y])\n\t" "movq 0x30(%[y]),%%mm3\n\t" OC_FDCT_STAGE1_8x4 OC_FDCT8x4("0x00","0x10","0x20","0x30","0x08","0x18","0x28","0x38") OC_TRANSPOSE8x4("0x00","0x10","0x20","0x30","0x08","0x18","0x28","0x38") /*mm0={-2}x4*/ "pcmpeqw %%mm0,%%mm0\n\t" "paddw %%mm0,%%mm0\n\t" /*Round the results.*/ "psubw %%mm0,%%mm1\n\t" "psubw %%mm0,%%mm2\n\t" "psraw $2,%%mm1\n\t" "psubw %%mm0,%%mm3\n\t" "movq %%mm1,0x18(%[y])\n\t" "psraw $2,%%mm2\n\t" "psubw %%mm0,%%mm4\n\t" "movq 0x08(%[y]),%%mm1\n\t" "psraw $2,%%mm3\n\t" "psubw %%mm0,%%mm5\n\t" "psraw $2,%%mm4\n\t" "psubw %%mm0,%%mm6\n\t" "psraw $2,%%mm5\n\t" "psubw %%mm0,%%mm7\n\t" "psraw $2,%%mm6\n\t" "psubw %%mm0,%%mm1\n\t" "psraw $2,%%mm7\n\t" "movq 0x40(%[y]),%%mm0\n\t" "psraw $2,%%mm1\n\t" "movq %%mm7,0x30(%[y])\n\t" "movq 0x78(%[y]),%%mm7\n\t" "movq %%mm1,0x08(%[y])\n\t" "movq 0x50(%[y]),%%mm1\n\t" "movq %%mm6,0x20(%[y])\n\t" "movq 0x68(%[y]),%%mm6\n\t" "movq %%mm2,0x28(%[y])\n\t" "movq 0x60(%[y]),%%mm2\n\t" "movq %%mm5,0x10(%[y])\n\t" "movq 0x58(%[y]),%%mm5\n\t" "movq %%mm3,0x38(%[y])\n\t" "movq 0x70(%[y]),%%mm3\n\t" "movq %%mm4,0x00(%[y])\n\t" "movq 0x48(%[y]),%%mm4\n\t" OC_FDCT_STAGE1_8x4 OC_FDCT8x4("0x40","0x50","0x60","0x70","0x48","0x58","0x68","0x78") OC_TRANSPOSE8x4("0x40","0x50","0x60","0x70","0x48","0x58","0x68","0x78") /*mm0={-2}x4*/ "pcmpeqw %%mm0,%%mm0\n\t" "paddw %%mm0,%%mm0\n\t" /*Round the results.*/ "psubw %%mm0,%%mm1\n\t" "psubw %%mm0,%%mm2\n\t" "psraw $2,%%mm1\n\t" "psubw %%mm0,%%mm3\n\t" "movq %%mm1,0x58(%[y])\n\t" "psraw $2,%%mm2\n\t" "psubw %%mm0,%%mm4\n\t" "movq 0x48(%[y]),%%mm1\n\t" "psraw $2,%%mm3\n\t" "psubw %%mm0,%%mm5\n\t" "movq %%mm2,0x68(%[y])\n\t" "psraw $2,%%mm4\n\t" "psubw %%mm0,%%mm6\n\t" "movq %%mm3,0x78(%[y])\n\t" "psraw $2,%%mm5\n\t" "psubw %%mm0,%%mm7\n\t" "movq %%mm4,0x40(%[y])\n\t" "psraw $2,%%mm6\n\t" "psubw %%mm0,%%mm1\n\t" "movq %%mm5,0x50(%[y])\n\t" "psraw $2,%%mm7\n\t" "movq %%mm6,0x60(%[y])\n\t" "psraw $2,%%mm1\n\t" "movq %%mm7,0x70(%[y])\n\t" "movq %%mm1,0x48(%[y])\n\t" :[a]"=&r"(a) :[y]"r"(_y),[x]"r"(_x) :"memory" ); } #endif
30.328829
79
0.490965
[ "transform" ]
bd94908a04b4764b6858ca912c1dd555cf7300d2
2,365
h
C
include/ga_points_downsampler/ga_points_downsampler.h
ZhenshengLee/ga_points_downsampler
c55fc78fa672d4ae43891204ed1e410271d9ec89
[ "Apache-2.0" ]
1
2021-10-20T10:31:15.000Z
2021-10-20T10:31:15.000Z
include/ga_points_downsampler/ga_points_downsampler.h
ZhenshengLee/ga_points_downsampler
c55fc78fa672d4ae43891204ed1e410271d9ec89
[ "Apache-2.0" ]
null
null
null
include/ga_points_downsampler/ga_points_downsampler.h
ZhenshengLee/ga_points_downsampler
c55fc78fa672d4ae43891204ed1e410271d9ec89
[ "Apache-2.0" ]
null
null
null
#ifndef GA_CUPOCH_DOWNSAMPLER_CUPOCH_H #define GA_CUPOCH_DOWNSAMPLER_CUPOCH_H // ros hdrs #include <ros/ros.h> #include <tf_conversions/tf_eigen.h> #include <tf/transform_broadcaster.h> #include <tf/transform_listener.h> #include <nav_msgs/Odometry.h> #include <sensor_msgs/Imu.h> #include <sensor_msgs/PointCloud2.h> #include <std_msgs/Bool.h> #include <geometry_msgs/PoseWithCovarianceStamped.h> #include <geometry_msgs/Point.h> #include <geometry_msgs/Quaternion.h> #include <dynamic_reconfigure/server.h> // cupoch hdrs #include "cupoch/cupoch.h" // open3d hdrs // #include "open3d/Open3D.h" // pcl hdrs #include <pcl_ros/point_cloud.h> #include <pcl/filters/voxel_grid.h> #include <pcl/point_types.h> // std hdrs #include <mutex> #include <memory> #include <iostream> #include <unistd.h> #include <thread> #include <condition_variable> // cuda hdrs #include <cuda.h> #include <cuda_runtime.h> using namespace std; using PointT = pcl::PointXYZ; namespace gpuac { class CupochDownSampler { public: CupochDownSampler(ros::NodeHandle & nh); ~CupochDownSampler(); private: // ini void init(); void initializeLaunchParams(); void initializeSubPub(); void initializeThread(); // sys fun void detect_cupoch_thread(); // ros interface void points_callback(const sensor_msgs::PointCloud2ConstPtr& points_msg); private: // ros-com resource ros::NodeHandle m_private_nh; ros::Subscriber m_points_sub; // pubs ros::Publisher m_voxel_cupoch_pub; // 消息发布及数据: sensor_msgs::PointCloud2 m_pub_cupoch_pc; // sys res shared_ptr<std::thread> m_detect_cupoch_thread; std::mutex m_detect_cupoch_mutex; std::condition_variable m_detect_cupoch_cond; volatile bool m_detect_cupoch_thread_enable{false}; // alg processor std::shared_ptr<cupoch::geometry::PointCloud> m_points_cupoch_cloud{std::make_shared<cupoch::geometry::PointCloud>()}; // std::shared_ptr<open3d::geometry::PointCloud> m_points_open3d_cloud{std::make_shared<open3d::geometry::PointCloud>()}; pcl::PointCloud<PointT>::Ptr m_points_pcl_cloud{new pcl::PointCloud<PointT>}; bool m_is_downsample{true}; bool m_is_pub_pc{false}; bool m_is_use_gpu{true}; bool m_is_use_open3d{false}; // downsample double m_downsample_res{0.01}; }; // class } // namespace gpuac #endif
25.159574
125
0.735729
[ "geometry" ]
bd964e6c27956777ae5b2986721b0735ebced85c
1,948
h
C
content/public/browser/appcache_service.h
sarang-apps/darshan_browser
173649bb8a7c656dc60784d19e7bb73e07c20daa
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
content/public/browser/appcache_service.h
sarang-apps/darshan_browser
173649bb8a7c656dc60784d19e7bb73e07c20daa
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
null
null
null
content/public/browser/appcache_service.h
sarang-apps/darshan_browser
173649bb8a7c656dc60784d19e7bb73e07c20daa
[ "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2
2021-01-05T23:43:46.000Z
2021-01-07T23:36:34.000Z
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CONTENT_PUBLIC_BROWSER_APPCACHE_SERVICE_H_ #define CONTENT_PUBLIC_BROWSER_APPCACHE_SERVICE_H_ #include <map> #include <memory> #include <set> #include <vector> #include "base/memory/ref_counted.h" #include "content/common/content_export.h" #include "net/base/completion_once_callback.h" #include "url/origin.h" namespace blink { namespace mojom { class AppCacheInfo; } // namespace mojom } // namespace blink namespace content { // Refcounted container to avoid copying the collection in callbacks. struct CONTENT_EXPORT AppCacheInfoCollection : public base::RefCountedThreadSafe<AppCacheInfoCollection> { AppCacheInfoCollection(); std::map<url::Origin, std::vector<blink::mojom::AppCacheInfo>> infos_by_origin; private: friend class base::RefCountedThreadSafe<AppCacheInfoCollection>; virtual ~AppCacheInfoCollection(); }; // Exposes a limited interface to the AppCacheService. // Call these methods only on the IO thread. class CONTENT_EXPORT AppCacheService { public: // Populates 'collection' with info about all of the appcaches stored // within the service, 'callback' is invoked upon completion. The service // acquires a reference to the 'collection' until completion. // This method always completes asynchronously. virtual void GetAllAppCacheInfo(AppCacheInfoCollection* collection, net::CompletionOnceCallback callback) = 0; // Deletes all appcache groups associated with an origin. // This method always completes asynchronously. virtual void DeleteAppCachesForOrigin( const url::Origin& origin, net::CompletionOnceCallback callback) = 0; protected: virtual ~AppCacheService(); }; } // namespace content #endif // CONTENT_PUBLIC_BROWSER_APPCACHE_SERVICE_H_
30.920635
76
0.761294
[ "vector" ]
ae66975cd6151108c1ea8d54e14e421bc3f4bc9d
3,955
h
C
include/git2/notes.h
Asquera/libgit2
d9a5009ea6ae980bb3770055aa54c05a5164bcb0
[ "Apache-2.0" ]
null
null
null
include/git2/notes.h
Asquera/libgit2
d9a5009ea6ae980bb3770055aa54c05a5164bcb0
[ "Apache-2.0" ]
null
null
null
include/git2/notes.h
Asquera/libgit2
d9a5009ea6ae980bb3770055aa54c05a5164bcb0
[ "Apache-2.0" ]
null
null
null
/* * Copyright (C) 2009-2012 the libgit2 contributors * * This file is part of libgit2, distributed under the GNU GPL v2 with * a Linking Exception. For full terms see the included COPYING file. */ #ifndef INCLUDE_git_note_h__ #define INCLUDE_git_note_h__ #include "oid.h" /** * @file git2/notes.h * @brief Git notes management routines * @defgroup git_note Git notes management routines * @ingroup Git * @{ */ GIT_BEGIN_DECL /** * Callback for git_note_foreach. * * Receives: * - blob_id: Oid of the blob containing the message * - annotated_object_id: Oid of the git object being annotated * - payload: Payload data passed to `git_note_foreach` */ typedef int (*git_note_foreach_cb)( const git_oid *blob_id, const git_oid *annotated_object_id, void *payload); /** * Read the note for an object * * The note must be freed manually by the user. * * @param out pointer to the read note; NULL in case of error * @param repo repository where to look up the note * @param notes_ref canonical name of the reference to use (optional); defaults to * "refs/notes/commits" * @param oid OID of the git object to read the note from * * @return 0 or an error code */ GIT_EXTERN(int) git_note_read( git_note **out, git_repository *repo, const char *notes_ref, const git_oid *oid); /** * Get the note message * * @param note * @return the note message */ GIT_EXTERN(const char *) git_note_message(const git_note *note); /** * Get the note object OID * * @param note * @return the note object OID */ GIT_EXTERN(const git_oid *) git_note_oid(const git_note *note); /** * Add a note for an object * * @param out pointer to store the OID (optional); NULL in case of error * @param repo repository where to store the note * @param author signature of the notes commit author * @param committer signature of the notes commit committer * @param notes_ref canonical name of the reference to use (optional); * defaults to "refs/notes/commits" * @param oid OID of the git object to decorate * @param note Content of the note to add for object oid * * @return 0 or an error code */ GIT_EXTERN(int) git_note_create( git_oid *out, git_repository *repo, const git_signature *author, const git_signature *committer, const char *notes_ref, const git_oid *oid, const char *note); /** * Remove the note for an object * * @param repo repository where the note lives * @param notes_ref canonical name of the reference to use (optional); * defaults to "refs/notes/commits" * @param author signature of the notes commit author * @param committer signature of the notes commit committer * @param oid OID of the git object to remove the note from * * @return 0 or an error code */ GIT_EXTERN(int) git_note_remove( git_repository *repo, const char *notes_ref, const git_signature *author, const git_signature *committer, const git_oid *oid); /** * Free a git_note object * * @param note git_note object */ GIT_EXTERN(void) git_note_free(git_note *note); /** * Get the default notes reference for a repository * * @param out Pointer to the default notes reference * @param repo The Git repository * * @return 0 or an error code */ GIT_EXTERN(int) git_note_default_ref(const char **out, git_repository *repo); /** * Loop over all the notes within a specified namespace * and issue a callback for each one. * * @param repo Repository where to find the notes. * * @param notes_ref Reference to read from (optional); defaults to * "refs/notes/commits". * * @param note_cb Callback to invoke per found annotation. Return non-zero * to stop looping. * * @param payload Extra parameter to callback function. * * @return 0 on success, GIT_EUSER on non-zero callback, or error code */ GIT_EXTERN(int) git_note_foreach( git_repository *repo, const char *notes_ref, git_note_foreach_cb note_cb, void *payload); /** @} */ GIT_END_DECL #endif
25.849673
82
0.71732
[ "object" ]
ae6acd94d5a7d2c694b5c016579a7002b9f84c4f
5,267
h
C
kent/src/inc/jsonParse.h
EffieChantzi/UCSC-Genome-Browser
2880c645ba661602eeb4cb5f13578e8d193aa2ad
[ "IJG" ]
15
2017-11-25T20:20:27.000Z
2022-03-27T13:51:20.000Z
kent/src/inc/jsonParse.h
EffieChantzi/UCSC-Genome-Browser
2880c645ba661602eeb4cb5f13578e8d193aa2ad
[ "IJG" ]
14
2017-02-08T22:43:29.000Z
2021-12-28T16:21:22.000Z
kent/src/inc/jsonParse.h
EffieChantzi/UCSC-Genome-Browser
2880c645ba661602eeb4cb5f13578e8d193aa2ad
[ "IJG" ]
2
2018-04-27T09:55:50.000Z
2020-06-04T19:42:04.000Z
/* jsonParse - routines to parse JSON strings and traverse and pick things out of the * resulting object tree. */ #ifndef JSONPARSE_H #define JSONPARSE_H /* JSON Element code let's you build up a DOM like data structure in memory and then serialize it into html for communication with client side code. */ // supported types typedef enum _jsonElementType { jsonList = 0, jsonObject = 1, jsonNumber = 2, jsonDouble = 3, jsonBoolean = 4, jsonString = 5, jsonNull = 6 } jsonElementType; union jsonElementVal { struct slRef *jeList; struct hash *jeHash; long jeNumber; double jeDouble; boolean jeBoolean; char *jeString; void *jeNull; }; struct jsonElement { jsonElementType type; union jsonElementVal val; }; // constructors for each jsonElementType struct jsonElement *newJsonString(char *str); struct jsonElement *newJsonBoolean(boolean val); struct jsonElement *newJsonNumber(long val); struct jsonElement *newJsonDouble(double val); struct jsonElement *newJsonObject(struct hash *h); struct jsonElement *newJsonList(struct slRef *list); struct jsonElement *newJsonNull(); void jsonObjectAdd(struct jsonElement *h, char *name, struct jsonElement *ele); // Add a new element to a jsonObject; existing values are replaced. void jsonListAdd(struct jsonElement *list, struct jsonElement *ele); // Add a new element to a jsonList struct jsonElement *jsonParse(char *str); // parse string into an in-memory json representation char *jsonStringEscape(char *inString); /* backslash escape a string for use in a double quoted json string. * More conservative than javaScriptLiteralEncode because * some json parsers complain if you escape & or ' */ void jsonFindNameRecurse(struct jsonElement *ele, char *jName, struct slName **pList); // Search the JSON tree recursively to find all the values associated to // the name, and add them to head of the list. struct slName *jsonFindName(struct jsonElement *json, char *jName); // Search the JSON tree to find all the values associated to the name // and add them to head of the list. struct slName *jsonFindNameUniq(struct jsonElement *json, char *jName); // Search the JSON tree to find all the values associated to the name // and add them to head of the list. void jsonElementRecurse(struct jsonElement *ele, char *name, boolean isLast, void (*startCallback)(struct jsonElement *ele, char *name, boolean isLast, void *context), // Called at element start void (*endCallback)(struct jsonElement *ele, char *name, boolean isLast, void *context), // Called at element end void *context); /* Recurse through JSON tree calling callback functions with element and context. * Either startCallback or endCallback may be NULL, as can be name. */ void jsonPrintOneStart(struct jsonElement *ele, char *name, boolean isLast, int indent, FILE *f); /* Print the start of one json element - just name and maybe an opening brace or bracket. * Recursion is handled elsewhere. */ void jsonPrintOneEnd(struct jsonElement *ele, char *name, boolean isLast, boolean indent, FILE *f); /* Print object end */ void jsonPrintToFile(struct jsonElement *root, char *name, FILE *f, int indentPer); /* Print out JSON object and all children nicely indented to f as JSON objects. * Name may be NULL. Implemented via jsonPrintOneStart/jsonPrintOneEnd. */ /** Routines that check json type and return corresponding value. **/ struct slRef *jsonListVal(struct jsonElement *ele, char *name); /* Enforce element is type jsonList or jsonNull. Return list value, which may be NULL. */ struct hash *jsonObjectVal(struct jsonElement *ele, char *name); /* Enforce object is type jsonObject or jsonNull. Return object hash, which may be NULL. */ long jsonNumberVal(struct jsonElement *ele, char *name); /* Enforce element is type jsonNumber and return value. */ double jsonDoubleVal(struct jsonElement *ele, char *name); /* Enforce element is type jsonDouble and return value. */ boolean jsonBooleanVal(struct jsonElement *ele, char *name); /* Enforce element is type jsonBoolean and return value. */ char *jsonStringVal(struct jsonElement *ele, char *eleName); /* Enforce element is type jsonString or jsonNull. Return value, which may be NULL. */ /** Routines that help work with json objects (bracket enclosed key/val pairs **/ struct jsonElement *jsonFindNamedField(struct jsonElement *object, char *objectName, char *field); /* Find named field of object or return NULL if not found. Abort if object * is not actually an object. */ struct jsonElement *jsonMustFindNamedField(struct jsonElement *object, char *objectName, char *field); /* Find named field of object or die trying. */ char *jsonOptionalStringField(struct jsonElement *object, char *field, char *defaultVal); /* Return string valued field of object, or defaultVal if it doesn't exist. */ char *jsonStringField(struct jsonElement *object, char *field); /* Return string valued field of object or abort if field doesn't exist. */ boolean jsonOptionalBooleanField(struct jsonElement *object, char *field, boolean defaultVal); /* Return boolean valued field of object, or defaultVal if it doesn't exist. */ #endif /* JSONPARSE_H */
38.166667
102
0.747484
[ "object" ]
ae6b93d9ac0c82157bbb7e30c89903257325ad8f
1,281
h
C
SORBadgeValueManager.h
MarkeJave/SORSyncObjectRelation
db95012b30eee4d7eb2f447f87971634c71748d0
[ "MIT" ]
1
2018-02-22T12:27:33.000Z
2018-02-22T12:27:33.000Z
SORBadgeValueManager.h
MarkeJave/SORSyncObjectRelation
db95012b30eee4d7eb2f447f87971634c71748d0
[ "MIT" ]
null
null
null
SORBadgeValueManager.h
MarkeJave/SORSyncObjectRelation
db95012b30eee4d7eb2f447f87971634c71748d0
[ "MIT" ]
1
2018-02-22T12:27:34.000Z
2018-02-22T12:27:34.000Z
// // SORBadgeValueManager.h // SORSyncObjectRelation // // Created by xulinfeng on 2017/1/11. // Copyright © 2017年 xulinfeng. All rights reserved. // #import <Foundation/Foundation.h> #import <SORObjectRelation/SORObjectRelationUmbrella.h> extern NSString * const SORObjectRelationRootName; extern NSString * const SORObjectRelationNormalMessageDomainName; @interface SORBadgeValueManager : NSObject /** * name: com.SORSyncObjectRelation.object.relation.root */ @property (nonatomic, strong, readonly) SORSyncCountObjectRelation *rootObjectRelation; /** * root.home */ @property (nonatomic, strong, readonly) SORSyncCountObjectRelation *homeObjectRelation; /** * root.message * * 普通聊天消息 归属于 root.message * domain:root.message.normal.messages SORObjectRelationNormalMessageDomainName * name construct: root.message.normal.messages##chatID SORMajorKeyCountObjectRelation */ @property (nonatomic, strong, readonly) SORSyncCountObjectRelation *messageObjectRelation; - (SORSyncMajorKeyCountObjectRelation *)normalMessageObjectRelationWithChatID:(NSString *)chatID; + (instancetype)sharedManager; @end @interface NSObject (BadgeValueObjectRelation) @property (nonatomic, strong, readonly) SORSyncCountObjectRelation *badgeValueObjectRelation; @end
27.255319
97
0.797034
[ "object" ]
ae6ce946cdc1eadbefbd0af77a26893c4364d894
3,573
h
C
Code/Common/itkRGBToLuminanceImageAdaptor.h
kiranhs/ITKv4FEM-Kiran
0e4ab3b61b5fc4c736f04a73dd19e41390f20152
[ "BSD-3-Clause" ]
1
2018-04-15T13:32:43.000Z
2018-04-15T13:32:43.000Z
Code/Common/itkRGBToLuminanceImageAdaptor.h
kiranhs/ITKv4FEM-Kiran
0e4ab3b61b5fc4c736f04a73dd19e41390f20152
[ "BSD-3-Clause" ]
null
null
null
Code/Common/itkRGBToLuminanceImageAdaptor.h
kiranhs/ITKv4FEM-Kiran
0e4ab3b61b5fc4c736f04a73dd19e41390f20152
[ "BSD-3-Clause" ]
null
null
null
/*========================================================================= Program: Insight Segmentation & Registration Toolkit Module: itkRGBToLuminanceImageAdaptor.h Language: C++ Date: $Date$ Version: $Revision$ Copyright (c) Insight Software Consortium. All rights reserved. See ITKCopyright.txt or http://www.itk.org/HTML/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notices for more information. =========================================================================*/ #ifndef __itkRGBToLuminanceImageAdaptor_h #define __itkRGBToLuminanceImageAdaptor_h #include <itkImageAdaptor.h> #include "vnl/vnl_math.h" namespace itk { namespace Accessor { /** \class RGBToLuminancePixelAccessor * \brief Give access to Luminance of a color pixel type. * * RGBToLuminancePixelAccessor is templated over an internal type and an * external type representation. This class cast the input applies the function * to it and cast the result according to the types defined as template * parameters. The input pixel type must support the GetLuminance() method. * This is the case of the RGBPixel class for example. * * \ingroup ImageAdaptors */ template <class TInternalType, class TExternalType > class ITK_EXPORT RGBToLuminancePixelAccessor { public: /** External typedef. It defines the external aspect * that this class will exhibit. */ typedef TExternalType ExternalType; /** Internal typedef. It defines the internal real * representation of data. */ typedef TInternalType InternalType; static inline void Set(TInternalType & output, const TExternalType & input) {output = static_cast<TInternalType>( input.GetLuminance() );} static inline TExternalType Get( const TInternalType & input ) {return static_cast<TExternalType>( input.GetLuminance() );} }; } // end namespace Accessor /** \class RGBToLuminanceImageAdaptor * \brief Presents a color image as being composed of the Luminance of its pixels. * * Additional casting is performed according to the input and output image * types following C++ default casting rules. The input color pixel type must * provide a GetLuminance() method. * * \ingroup ImageAdaptors */ template <class TImage, class TOutputPixelType> class ITK_EXPORT RGBToLuminanceImageAdaptor : public ImageAdaptor<TImage, Accessor::RGBToLuminancePixelAccessor< typename TImage::PixelType, TOutputPixelType> > { public: /** Standard class typedefs. */ typedef RGBToLuminanceImageAdaptor Self; typedef ImageAdaptor<TImage, Accessor::RGBToLuminancePixelAccessor< typename TImage::PixelType, TOutputPixelType> > Superclass; typedef SmartPointer<Self> Pointer; typedef SmartPointer<const Self> ConstPointer; /** Method for creation through the object factory. */ itkNewMacro(Self); /** Run-time type information (and related methods). */ itkTypeMacro( RGBToLuminanceImageAdaptor, ImageAdaptor ); protected: RGBToLuminanceImageAdaptor() {} virtual ~RGBToLuminanceImageAdaptor() {} private: RGBToLuminanceImageAdaptor(const Self&); //purposely not implemented void operator=(const Self&); //purposely not implemented }; } // end namespace itk #endif
34.68932
82
0.684299
[ "object" ]
ae6e218ed40807008883c039acbb2476c90d33be
1,110
h
C
src_c/softfp/tiny_dnn/core/kernels/maxpool_op_avx.h
wqqchh2014/DNN
c00f6ce156e37ce966ffdac141410cdafc58f235
[ "BSD-3-Clause" ]
6
2020-02-21T10:40:42.000Z
2022-01-16T07:38:20.000Z
src_c/softfp/tiny_dnn/core/kernels/maxpool_op_avx.h
wqqchh2014/DNN
c00f6ce156e37ce966ffdac141410cdafc58f235
[ "BSD-3-Clause" ]
null
null
null
src_c/softfp/tiny_dnn/core/kernels/maxpool_op_avx.h
wqqchh2014/DNN
c00f6ce156e37ce966ffdac141410cdafc58f235
[ "BSD-3-Clause" ]
3
2020-05-04T16:13:23.000Z
2021-01-08T16:11:20.000Z
/* Copyright (c) 2013, Taiga Nomi and the respective contributors All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #pragma once #include <vector> #include "tiny_dnn/core/kernels/maxpool_op_internal.h" namespace tiny_dnn { namespace kernels { inline void maxpool_op_avx(const tensor_t &in_data, tensor_t &out_data, core::maxpool_params &params, const bool layer_parallelize) { maxpool_op_internal(in_data, out_data, params, layer_parallelize); } inline void maxpool_grad_op_avx(tensor_t &prev_delta, const tensor_t &curr_delta, std::vector<std::vector<size_t>> &max_idx, const std::vector<size_t> &in2out, const bool layer_parallelize) { maxpool_grad_op_internal(prev_delta, curr_delta, max_idx, in2out, layer_parallelize); } } // namespace kernels } // namespace tiny_dnn
31.714286
80
0.607207
[ "vector" ]
ae76726ae0b91e8bb9fad86d783f100cb97a09bb
1,971
h
C
src/runtime/crt/ndarray.h
retamia/tvm
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
[ "Apache-2.0" ]
9
2019-12-17T08:03:54.000Z
2022-01-19T02:34:23.000Z
src/runtime/crt/ndarray.h
retamia/tvm
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
[ "Apache-2.0" ]
2
2020-06-18T21:15:42.000Z
2020-06-24T17:38:37.000Z
src/runtime/crt/ndarray.h
retamia/tvm
5d25dc54d874bf2ddf0e8cf34c4748e9e2656fd8
[ "Apache-2.0" ]
3
2020-10-04T20:30:18.000Z
2022-01-24T18:03:52.000Z
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file tvm/runtime/crt/ndarray.h * \brief Abstract device memory management API */ #ifndef TVM_RUNTIME_CRT_NDARRAY_H_ #define TVM_RUNTIME_CRT_NDARRAY_H_ #include <dlpack/dlpack.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <tvm/runtime/c_backend_api.h> #include <tvm/runtime/c_runtime_api.h> /*! \brief Magic number for NDArray file */ static const uint64_t kTVMNDArrayMagic = 0xDD5E40F096B4A13F; /*! \brief Magic number for NDArray list file */ static const uint64_t kTVMNDArrayListMagic = 0xF7E58D4F05049CB7; typedef struct TVMNDArray { DLTensor dl_tensor; } TVMNDArray; TVMNDArray TVMNDArray_Create(uint32_t ndim, const tvm_index_t* shape, DLDataType dtype, DLContext ctx); TVMNDArray TVMNDArray_Empty(uint32_t ndim, const tvm_index_t* shape, DLDataType dtype, DLContext ctx); int TVMNDArray_Load(TVMNDArray* ret, const char** strm); TVMNDArray TVMNDArray_CreateView(TVMNDArray* arr, const tvm_index_t* shape, uint32_t ndim, DLDataType dtype); int TVMNDArray_Release(TVMNDArray* arr); #endif // TVM_RUNTIME_CRT_NDARRAY_H_
33.982759
90
0.738204
[ "shape" ]
ae81a9e0890fc01540de84ef90229096b66d2109
842
h
C
MirageENGiNE_Engine/src/game/components/freelook.h
Harha/MirageEngine
6d0634385f514bbfce08cfacc8d743ce78c461fc
[ "MIT" ]
1
2017-05-27T23:23:28.000Z
2017-05-27T23:23:28.000Z
MirageENGiNE_Engine/src/game/components/freelook.h
Harha/MirageEngine
6d0634385f514bbfce08cfacc8d743ce78c461fc
[ "MIT" ]
null
null
null
MirageENGiNE_Engine/src/game/components/freelook.h
Harha/MirageEngine
6d0634385f514bbfce08cfacc8d743ce78c461fc
[ "MIT" ]
null
null
null
#ifndef FREELOOK_H #define FREELOOK_H #include "game/gcomponent.h" // std includes #include <string> #include <vector> namespace mirage { class GraphicsEngine; class GameObject; enum FreeLookDirection_t : uint8_t { FLD_X, FLD_Y, FLD_Z }; class FreeLook : public GameComponent { public: FreeLook( float sensitivity = 0.1f, float rollSpeed = 0.25f, const std::string & identifier = "freelook" ); ~FreeLook(); virtual void update(float dt) override; virtual void render(GraphicsEngine * const gfxEngine) override; void look(FreeLookDirection_t direction, float delta, float dt); void setSensitivity(float sensitivity); float getSensitivity() const; void setRollSpeed(float rollSpeed); float getRollSpeed() const; private: float m_sensitivity; float m_rollSpeed; }; } #endif // FREELOOK_H
17.914894
66
0.726841
[ "render", "vector" ]
ae86abc8df16315cf449ea9878eacd5d2f00af8c
1,979
h
C
include/World/Animation.h
tizian/Cendric2
5b0438c73a751bcc0d63c3af839af04ab0fb21a3
[ "MIT" ]
279
2015-05-06T19:04:07.000Z
2022-03-21T21:33:38.000Z
include/World/Animation.h
tizian/Cendric2
5b0438c73a751bcc0d63c3af839af04ab0fb21a3
[ "MIT" ]
222
2016-10-26T15:56:25.000Z
2021-10-03T15:30:18.000Z
include/World/Animation.h
tizian/Cendric2
5b0438c73a751bcc0d63c3af839af04ab0fb21a3
[ "MIT" ]
49
2015-10-01T21:23:03.000Z
2022-03-19T20:11:31.000Z
#pragma once #include "global.h" //////////////////////////////////////////////////////////// // This class was altered from the original source // by Ironbell // For the original source, see notice below //////////////////////////////////////////////////////////// // // Copyright (C) 2014 Maximilian Wagenbach (aka. Foaly) (foaly.f@web.de) // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it freely, // subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; // you must not claim that you wrote the original software. // If you use this software in a product, an acknowledgment // in the product documentation would be appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, // and must not be misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // //////////////////////////////////////////////////////////// class Animation final { public: Animation(const sf::Time& frameTime) { m_frameTime = frameTime; }; Animation() { m_frameTime = sf::milliseconds(100); }; void clearFrames(); void addFrame(const sf::IntRect& rect); void setSpriteSheet(const sf::Texture* texture); void setFrameTime(const sf::Time& frameTime); void setLooped(bool isLooped); const sf::Texture* getSpriteSheet() const; std::size_t getSize() const; const sf::IntRect& getFrame(std::size_t n) const; const sf::Time& getFrameTime() const; const sf::Time getAnimationTime() const; bool isLooped() const; private: bool m_isLooped = true; std::vector<sf::IntRect> m_frames; sf::Time m_frameTime; const sf::Texture* m_texture = nullptr; };
35.981818
101
0.674078
[ "vector" ]
ae922a3d32395739962ed2e8d79a7bde06183487
13,152
h
C
aws-cpp-sdk-servicecatalog-appregistry/include/aws/servicecatalog-appregistry/model/GetApplicationResult.h
perfectrecall/aws-sdk-cpp
fb8cbebf2fd62720b65aeff841ad2950e73d8ebd
[ "Apache-2.0" ]
1
2022-02-10T08:06:54.000Z
2022-02-10T08:06:54.000Z
aws-cpp-sdk-servicecatalog-appregistry/include/aws/servicecatalog-appregistry/model/GetApplicationResult.h
perfectrecall/aws-sdk-cpp
fb8cbebf2fd62720b65aeff841ad2950e73d8ebd
[ "Apache-2.0" ]
1
2022-01-03T23:59:37.000Z
2022-01-03T23:59:37.000Z
aws-cpp-sdk-servicecatalog-appregistry/include/aws/servicecatalog-appregistry/model/GetApplicationResult.h
ravindra-wagh/aws-sdk-cpp
7d5ff01b3c3b872f31ca98fb4ce868cd01e97696
[ "Apache-2.0" ]
1
2021-12-30T04:25:33.000Z
2021-12-30T04:25:33.000Z
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/servicecatalog-appregistry/AppRegistry_EXPORTS.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <aws/core/utils/DateTime.h> #include <aws/core/utils/memory/stl/AWSMap.h> #include <aws/servicecatalog-appregistry/model/Integrations.h> #include <utility> namespace Aws { template<typename RESULT_TYPE> class AmazonWebServiceResult; namespace Utils { namespace Json { class JsonValue; } // namespace Json } // namespace Utils namespace AppRegistry { namespace Model { class AWS_APPREGISTRY_API GetApplicationResult { public: GetApplicationResult(); GetApplicationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); GetApplicationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Json::JsonValue>& result); /** * <p>The identifier of the application.</p> */ inline const Aws::String& GetId() const{ return m_id; } /** * <p>The identifier of the application.</p> */ inline void SetId(const Aws::String& value) { m_id = value; } /** * <p>The identifier of the application.</p> */ inline void SetId(Aws::String&& value) { m_id = std::move(value); } /** * <p>The identifier of the application.</p> */ inline void SetId(const char* value) { m_id.assign(value); } /** * <p>The identifier of the application.</p> */ inline GetApplicationResult& WithId(const Aws::String& value) { SetId(value); return *this;} /** * <p>The identifier of the application.</p> */ inline GetApplicationResult& WithId(Aws::String&& value) { SetId(std::move(value)); return *this;} /** * <p>The identifier of the application.</p> */ inline GetApplicationResult& WithId(const char* value) { SetId(value); return *this;} /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline const Aws::String& GetArn() const{ return m_arn; } /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline void SetArn(const Aws::String& value) { m_arn = value; } /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline void SetArn(Aws::String&& value) { m_arn = std::move(value); } /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline void SetArn(const char* value) { m_arn.assign(value); } /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline GetApplicationResult& WithArn(const Aws::String& value) { SetArn(value); return *this;} /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline GetApplicationResult& WithArn(Aws::String&& value) { SetArn(std::move(value)); return *this;} /** * <p>The Amazon resource name (ARN) that specifies the application across * services.</p> */ inline GetApplicationResult& WithArn(const char* value) { SetArn(value); return *this;} /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline const Aws::String& GetName() const{ return m_name; } /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline void SetName(const Aws::String& value) { m_name = value; } /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline void SetName(Aws::String&& value) { m_name = std::move(value); } /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline void SetName(const char* value) { m_name.assign(value); } /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline GetApplicationResult& WithName(const Aws::String& value) { SetName(value); return *this;} /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline GetApplicationResult& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;} /** * <p>The name of the application. The name must be unique in the region in which * you are creating the application.</p> */ inline GetApplicationResult& WithName(const char* value) { SetName(value); return *this;} /** * <p>The description of the application.</p> */ inline const Aws::String& GetDescription() const{ return m_description; } /** * <p>The description of the application.</p> */ inline void SetDescription(const Aws::String& value) { m_description = value; } /** * <p>The description of the application.</p> */ inline void SetDescription(Aws::String&& value) { m_description = std::move(value); } /** * <p>The description of the application.</p> */ inline void SetDescription(const char* value) { m_description.assign(value); } /** * <p>The description of the application.</p> */ inline GetApplicationResult& WithDescription(const Aws::String& value) { SetDescription(value); return *this;} /** * <p>The description of the application.</p> */ inline GetApplicationResult& WithDescription(Aws::String&& value) { SetDescription(std::move(value)); return *this;} /** * <p>The description of the application.</p> */ inline GetApplicationResult& WithDescription(const char* value) { SetDescription(value); return *this;} /** * <p>The ISO-8601 formatted timestamp of the moment when the application was * created.</p> */ inline const Aws::Utils::DateTime& GetCreationTime() const{ return m_creationTime; } /** * <p>The ISO-8601 formatted timestamp of the moment when the application was * created.</p> */ inline void SetCreationTime(const Aws::Utils::DateTime& value) { m_creationTime = value; } /** * <p>The ISO-8601 formatted timestamp of the moment when the application was * created.</p> */ inline void SetCreationTime(Aws::Utils::DateTime&& value) { m_creationTime = std::move(value); } /** * <p>The ISO-8601 formatted timestamp of the moment when the application was * created.</p> */ inline GetApplicationResult& WithCreationTime(const Aws::Utils::DateTime& value) { SetCreationTime(value); return *this;} /** * <p>The ISO-8601 formatted timestamp of the moment when the application was * created.</p> */ inline GetApplicationResult& WithCreationTime(Aws::Utils::DateTime&& value) { SetCreationTime(std::move(value)); return *this;} /** * <p>The ISO-8601 formatted timestamp of the moment when the application was last * updated.</p> */ inline const Aws::Utils::DateTime& GetLastUpdateTime() const{ return m_lastUpdateTime; } /** * <p>The ISO-8601 formatted timestamp of the moment when the application was last * updated.</p> */ inline void SetLastUpdateTime(const Aws::Utils::DateTime& value) { m_lastUpdateTime = value; } /** * <p>The ISO-8601 formatted timestamp of the moment when the application was last * updated.</p> */ inline void SetLastUpdateTime(Aws::Utils::DateTime&& value) { m_lastUpdateTime = std::move(value); } /** * <p>The ISO-8601 formatted timestamp of the moment when the application was last * updated.</p> */ inline GetApplicationResult& WithLastUpdateTime(const Aws::Utils::DateTime& value) { SetLastUpdateTime(value); return *this;} /** * <p>The ISO-8601 formatted timestamp of the moment when the application was last * updated.</p> */ inline GetApplicationResult& WithLastUpdateTime(Aws::Utils::DateTime&& value) { SetLastUpdateTime(std::move(value)); return *this;} /** * <p>The number of top-level resources that were registered as part of this * application.</p> */ inline int GetAssociatedResourceCount() const{ return m_associatedResourceCount; } /** * <p>The number of top-level resources that were registered as part of this * application.</p> */ inline void SetAssociatedResourceCount(int value) { m_associatedResourceCount = value; } /** * <p>The number of top-level resources that were registered as part of this * application.</p> */ inline GetApplicationResult& WithAssociatedResourceCount(int value) { SetAssociatedResourceCount(value); return *this;} /** * <p>Key-value pairs associated with the application.</p> */ inline const Aws::Map<Aws::String, Aws::String>& GetTags() const{ return m_tags; } /** * <p>Key-value pairs associated with the application.</p> */ inline void SetTags(const Aws::Map<Aws::String, Aws::String>& value) { m_tags = value; } /** * <p>Key-value pairs associated with the application.</p> */ inline void SetTags(Aws::Map<Aws::String, Aws::String>&& value) { m_tags = std::move(value); } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& WithTags(const Aws::Map<Aws::String, Aws::String>& value) { SetTags(value); return *this;} /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& WithTags(Aws::Map<Aws::String, Aws::String>&& value) { SetTags(std::move(value)); return *this;} /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(const Aws::String& key, const Aws::String& value) { m_tags.emplace(key, value); return *this; } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(Aws::String&& key, const Aws::String& value) { m_tags.emplace(std::move(key), value); return *this; } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(const Aws::String& key, Aws::String&& value) { m_tags.emplace(key, std::move(value)); return *this; } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(Aws::String&& key, Aws::String&& value) { m_tags.emplace(std::move(key), std::move(value)); return *this; } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(const char* key, Aws::String&& value) { m_tags.emplace(key, std::move(value)); return *this; } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(Aws::String&& key, const char* value) { m_tags.emplace(std::move(key), value); return *this; } /** * <p>Key-value pairs associated with the application.</p> */ inline GetApplicationResult& AddTags(const char* key, const char* value) { m_tags.emplace(key, value); return *this; } /** * <p>The information about the integration of the application with other services, * such as Resource Groups.</p> */ inline const Integrations& GetIntegrations() const{ return m_integrations; } /** * <p>The information about the integration of the application with other services, * such as Resource Groups.</p> */ inline void SetIntegrations(const Integrations& value) { m_integrations = value; } /** * <p>The information about the integration of the application with other services, * such as Resource Groups.</p> */ inline void SetIntegrations(Integrations&& value) { m_integrations = std::move(value); } /** * <p>The information about the integration of the application with other services, * such as Resource Groups.</p> */ inline GetApplicationResult& WithIntegrations(const Integrations& value) { SetIntegrations(value); return *this;} /** * <p>The information about the integration of the application with other services, * such as Resource Groups.</p> */ inline GetApplicationResult& WithIntegrations(Integrations&& value) { SetIntegrations(std::move(value)); return *this;} private: Aws::String m_id; Aws::String m_arn; Aws::String m_name; Aws::String m_description; Aws::Utils::DateTime m_creationTime; Aws::Utils::DateTime m_lastUpdateTime; int m_associatedResourceCount; Aws::Map<Aws::String, Aws::String> m_tags; Integrations m_integrations; }; } // namespace Model } // namespace AppRegistry } // namespace Aws
33.55102
148
0.655414
[ "model" ]
ae993e9d8e80c53a7380021de2c7fad4e87b97d9
12,350
c
C
unshrink.c
ThirdProject/android_external_unzip
a24c870b9f87ce692bdd352e2dfa4e06fcd266b5
[ "Info-ZIP" ]
157
2015-07-08T18:29:22.000Z
2022-03-10T10:22:58.000Z
unshrink.c
ThirdProject/android_external_unzip
a24c870b9f87ce692bdd352e2dfa4e06fcd266b5
[ "Info-ZIP" ]
1,037
2015-07-18T03:09:12.000Z
2022-03-13T17:39:55.000Z
unshrink.c
ThirdProject/android_external_unzip
a24c870b9f87ce692bdd352e2dfa4e06fcd266b5
[ "Info-ZIP" ]
74
2015-07-08T19:42:19.000Z
2021-12-22T06:15:46.000Z
/* Copyright (c) 1990-2008 Info-ZIP. All rights reserved. See the accompanying file LICENSE, version 2000-Apr-09 or later (the contents of which are also included in unzip.h) for terms of use. If, for some reason, all these files are missing, the Info-ZIP license also may be found at: ftp://ftp.info-zip.org/pub/infozip/license.html */ /*--------------------------------------------------------------------------- unshrink.c version 1.22 19 Mar 2008 NOTE: This code may or may not infringe on the so-called "Welch patent" owned by Unisys. (From reading the patent, it appears that a pure LZW decompressor is *not* covered, but this claim has not been tested in court, and Unisys is reported to believe other- wise.) It is therefore the responsibility of the user to acquire whatever license(s) may be required for legal use of this code. THE INFO-ZIP GROUP DISCLAIMS ALL LIABILITY FOR USE OF THIS CODE IN VIOLATION OF APPLICABLE PATENT LAW. Shrinking is basically a dynamic LZW algorithm with allowed code sizes of up to 13 bits; in addition, there is provision for partial clearing of leaf nodes. PKWARE uses the special code 256 (decimal) to indicate a change in code size or a partial clear of the code tree: 256,1 for the former and 256,2 for the latter. [Note that partial clearing can "orphan" nodes: the parent-to-be can be cleared before its new child is added, but the child is added anyway (as an orphan, as though the parent still existed). When the tree fills up to the point where the parent node is reused, the orphan is effectively "adopted." Versions prior to 1.05 were affected more due to greater use of pointers (to children and siblings as well as parents).] This replacement version of unshrink.c was written from scratch. It is based only on the algorithms described in Mark Nelson's _The Data Compres- sion Book_ and in Terry Welch's original paper in the June 1984 issue of IEEE _Computer_; no existing source code, including any in Nelson's book, was used. Memory requirements have been reduced in this version and are now no more than the original Sam Smith code. This is still larger than any of the other algorithms: at a minimum, 8K+8K+16K (stack+values+parents) assuming 16-bit short ints, and this does not even include the output buffer (the other algorithms leave the uncompressed data in the work area, typically called slide[]). For machines with a 64KB data space this is a problem, particularly when text conversion is required and line endings have more than one character. UnZip's solution is to use two roughly equal halves of outbuf for the ASCII conversion in such a case; the "unshrink" argument to flush() signals that this is the case. For large-memory machines, a second outbuf is allocated for translations, but only if unshrinking and only if translations are required. | binary mode | text mode --------------------------------------------------- big mem | big outbuf | big outbuf + big outbuf2 <- malloc'd here small mem | small outbuf | half + half small outbuf Copyright 1994, 1995 Greg Roelofs. See the accompanying file "COPYING" in UnZip 5.20 (or later) source or binary distributions. ---------------------------------------------------------------------------*/ #define __UNSHRINK_C /* identifies this source module */ #define UNZIP_INTERNAL #include "unzip.h" #ifndef LZW_CLEAN static void partial_clear OF((__GPRO__ int lastcodeused)); #ifdef DEBUG # define OUTDBG(c) \ if ((c)<32 || (c)>=127) fprintf(stderr,"\\x%02x",(c)); else putc((c),stderr); #else # define OUTDBG(c) #endif /* HSIZE is defined as 2^13 (8192) in unzip.h (resp. unzpriv.h */ #define BOGUSCODE 256 #define FLAG_BITS parent /* upper bits of parent[] used as flag bits */ #define CODE_MASK (HSIZE - 1) /* 0x1fff (lower bits are parent's index) */ #define FREE_CODE HSIZE /* 0x2000 (code is unused or was cleared) */ #define HAS_CHILD (HSIZE << 1) /* 0x4000 (code has a child--do not clear) */ #define parent G.area.shrink.Parent #define Value G.area.shrink.value /* "value" conflicts with Pyramid ioctl.h */ #define stack G.area.shrink.Stack /***********************/ /* Function unshrink() */ /***********************/ int unshrink(__G) __GDEF { uch *stacktop = stack + (HSIZE - 1); register uch *newstr; uch finalval; int codesize=9, len, error; shrint code, oldcode, curcode; shrint lastfreecode; unsigned int outbufsiz; #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) /* Normally realbuf and outbuf will be the same. However, if the data * are redirected to a large memory buffer, realbuf will point to the * new location while outbuf will remain pointing to the malloc'd * memory buffer. */ uch *realbuf = G.outbuf; #else # define realbuf G.outbuf #endif /*--------------------------------------------------------------------------- Initialize various variables. ---------------------------------------------------------------------------*/ lastfreecode = BOGUSCODE; #ifndef VMS /* VMS uses its own buffer scheme for textmode flush(). */ #ifndef SMALL_MEM /* non-memory-limited machines: allocate second (large) buffer for * textmode conversion in flush(), but only if needed */ if (G.pInfo->textmode && !G.outbuf2 && (G.outbuf2 = (uch *)malloc(TRANSBUFSIZ)) == (uch *)NULL) return PK_MEM3; #endif #endif /* !VMS */ for (code = 0; code < BOGUSCODE; ++code) { Value[code] = (uch)code; parent[code] = BOGUSCODE; } for (code = BOGUSCODE+1; code < HSIZE; ++code) parent[code] = FREE_CODE; #if (defined(DLL) && !defined(NO_SLIDE_REDIR)) if (G.redirect_slide) { /* use normal outbuf unless we're a DLL routine */ realbuf = G.redirect_buffer; outbufsiz = (unsigned)G.redirect_size; } else #endif #ifdef DLL if (G.pInfo->textmode && !G.redirect_data) #else if (G.pInfo->textmode) #endif outbufsiz = RAWBUFSIZ; else outbufsiz = OUTBUFSIZ; G.outptr = realbuf; G.outcnt = 0L; /*--------------------------------------------------------------------------- Get and output first code, then loop over remaining ones. ---------------------------------------------------------------------------*/ READBITS(codesize, oldcode) if (G.zipeof) return PK_OK; finalval = (uch)oldcode; OUTDBG(finalval) *G.outptr++ = finalval; ++G.outcnt; while (TRUE) { READBITS(codesize, code) if (G.zipeof) break; if (code == BOGUSCODE) { /* possible to have consecutive escapes? */ READBITS(codesize, code) if (G.zipeof) break; if (code == 1) { ++codesize; Trace((stderr, " (codesize now %d bits)\n", codesize)); if (codesize > MAX_BITS) return PK_ERR; } else if (code == 2) { Trace((stderr, " (partial clear code)\n")); /* clear leafs (nodes with no children) */ partial_clear(__G__ lastfreecode); Trace((stderr, " (done with partial clear)\n")); lastfreecode = BOGUSCODE; /* reset start of free-node search */ } continue; } /*----------------------------------------------------------------------- Translate code: traverse tree from leaf back to root. -----------------------------------------------------------------------*/ newstr = stacktop; curcode = code; if (parent[code] == FREE_CODE) { /* or (FLAG_BITS[code] & FREE_CODE)? */ Trace((stderr, " (found a KwKwK code %d; oldcode = %d)\n", code, oldcode)); *newstr-- = finalval; code = oldcode; } while (code != BOGUSCODE) { if (newstr < stack) { /* Bogus compression stream caused buffer underflow! */ Trace((stderr, "unshrink stack overflow!\n")); return PK_ERR; } if (parent[code] == FREE_CODE) { /* or (FLAG_BITS[code] & FREE_CODE)? */ Trace((stderr, " (found a KwKwK code %d; oldcode = %d)\n", code, oldcode)); *newstr-- = finalval; code = oldcode; } else { *newstr-- = Value[code]; code = (shrint)(parent[code] & CODE_MASK); } } len = (int)(stacktop - newstr++); finalval = *newstr; /*----------------------------------------------------------------------- Write expanded string in reverse order to output buffer. -----------------------------------------------------------------------*/ Trace((stderr, "code %4d; oldcode %4d; char %3d (%c); len %d; string [", curcode, oldcode, (int)(*newstr), (*newstr<32 || *newstr>=127)? ' ':*newstr, len)); { register uch *p; for (p = newstr; p < newstr+len; ++p) { *G.outptr++ = *p; OUTDBG(*p) if (++G.outcnt == outbufsiz) { Trace((stderr, "doing flush(), outcnt = %lu\n", G.outcnt)); if ((error = flush(__G__ realbuf, G.outcnt, TRUE)) != 0) { Trace((stderr, "unshrink: flush() error (%d)\n", error)); return error; } G.outptr = realbuf; G.outcnt = 0L; Trace((stderr, "done with flush()\n")); } } } /*----------------------------------------------------------------------- Add new leaf (first character of newstr) to tree as child of oldcode. -----------------------------------------------------------------------*/ /* search for freecode */ code = (shrint)(lastfreecode + 1); /* add if-test before loop for speed? */ while ((code < HSIZE) && (parent[code] != FREE_CODE)) ++code; lastfreecode = code; Trace((stderr, "]; newcode %d\n", code)); if (code >= HSIZE) /* invalid compressed data caused max-code overflow! */ return PK_ERR; Value[code] = finalval; parent[code] = oldcode; oldcode = curcode; } /*--------------------------------------------------------------------------- Flush any remaining data and return to sender... ---------------------------------------------------------------------------*/ if (G.outcnt > 0L) { Trace((stderr, "doing final flush(), outcnt = %lu\n", G.outcnt)); if ((error = flush(__G__ realbuf, G.outcnt, TRUE)) != 0) { Trace((stderr, "unshrink: flush() error (%d)\n", error)); return error; } Trace((stderr, "done with flush()\n")); } return PK_OK; } /* end function unshrink() */ /****************************/ /* Function partial_clear() */ /* no longer recursive... */ /****************************/ static void partial_clear(__G__ lastcodeused) __GDEF int lastcodeused; { register shrint code; /* clear all nodes which have no children (i.e., leaf nodes only) */ /* first loop: mark each parent as such */ for (code = BOGUSCODE+1; code <= lastcodeused; ++code) { register shrint cparent = (shrint)(parent[code] & CODE_MASK); if (cparent > BOGUSCODE) FLAG_BITS[cparent] |= HAS_CHILD; /* set parent's child-bit */ } /* second loop: clear all nodes *not* marked as parents; reset flag bits */ for (code = BOGUSCODE+1; code <= lastcodeused; ++code) { if (FLAG_BITS[code] & HAS_CHILD) /* just clear child-bit */ FLAG_BITS[code] &= ~HAS_CHILD; else { /* leaf: lose it */ Trace((stderr, "%d\n", code)); parent[code] = FREE_CODE; } } return; } #endif /* !LZW_CLEAN */
36.646884
80
0.531984
[ "3d" ]
ae9caa2bc1c80d835262723282719e286359550e
677
h
C
include/ResourceManager.h
laonious/hexgame
309771d4d9a7fe6b41631579ac76b3dfb42fc257
[ "Unlicense" ]
3
2018-09-04T15:48:00.000Z
2019-12-03T18:11:18.000Z
include/ResourceManager.h
laonious/hexgame
309771d4d9a7fe6b41631579ac76b3dfb42fc257
[ "Unlicense" ]
null
null
null
include/ResourceManager.h
laonious/hexgame
309771d4d9a7fe6b41631579ac76b3dfb42fc257
[ "Unlicense" ]
1
2018-09-05T07:48:10.000Z
2018-09-05T07:48:10.000Z
#ifndef RESOURCE_MANAGER_H #define RESOURCE_MANAGER_H #include "Mesh.h" #include "ResourcePool.h" #include "ShaderManager.h" #include "Texture.h" class ResourceManager { public: static void Destroy(); static ResourceManager *getSingleton(); Texture *addTexture(const std::string &fileName); Texture *getTexture(const std::string &textureHandle); void addMesh(const std::string &fileName); Mesh *getMesh(const std::string &meshHandle); ShaderManager *getShaderManager(); private: ResourceManager(); ~ResourceManager(); static void Init(); static ResourceManager *m_Singleton; ResourcePool<Mesh> m_MeshPool; TexturePool m_TexturePool; }; #endif
20.515152
56
0.753323
[ "mesh" ]
aea018657b026d70ecdaca2543db68026942fdf8
2,469
h
C
src/world.h
literallyvoid/residue
1cd6e7bf1e2cb59ebbe081481c6e0f215516f7f8
[ "MIT" ]
null
null
null
src/world.h
literallyvoid/residue
1cd6e7bf1e2cb59ebbe081481c6e0f215516f7f8
[ "MIT" ]
null
null
null
src/world.h
literallyvoid/residue
1cd6e7bf1e2cb59ebbe081481c6e0f215516f7f8
[ "MIT" ]
null
null
null
/* -*- mode: c++ -*- */ #pragma once #include <unordered_map> #include <array> #include "chunk.h" #include "mesh.h" namespace std { template<typename T, size_t N> struct hash<array<T, N> > { typedef array<T, N> argument_type; typedef size_t result_type; result_type operator()(const argument_type& a) const { hash<T> hasher; result_type h = 0; for (result_type i = 0; i < N; ++i) { h = h * 31 + hasher(a[i]); } return h; } }; } class World { public: World(); ~World(); /** Draw the world. **/ void draw(); /** Cast a ray from `start`, in the direction `direction`, for `maxDistance` blocks. @param start Where the ray should start. @param direction Which direction the ray should go. @param maxDistance The maximum distance the ray should go. @param success Whether the raycast hit something or not. @return The (x, y, z) coordinates of the block that was hit. **/ std::array<int, 3> castRay(float start[3], float direction[3], float maxDistance, bool &success); // XXX: should probably return the block directly before it too, for placement, etc. /** Get the block at (x, y, z). @param x The x-coordinate of the block. @param y Ditto. @param z Ditto. @return The block at (x, y, z). **/ Block getBlock(int x, int y, int z); /** Set the block at (x, y, z) to `b`. @param x The x-coordinate of the block. @param y Ditto. @param z Ditto. @param b The block to place at (x, y, z). @return Nothing. **/ void setBlock(int x, int y, int z, Block b); /** Get the chunk which contains a block at (x, y). @param x The x-coordinate of the block. @param y Ditto. @param chunkX The x-position of the resultant chunk. @param chunkY Ditto. @return The chunk at (chunkX, chunkY). **/ Chunk *getChunk(int x, int y, int &chunkX, int &chunkY); private: /** Load the chunk at (x, y). @param x The x-coordinate of the chunk (divided by CHUNK_SIDE_LENGTH). @param y Ditto. @return If the chunk was already loaded. **/ bool loadChunk(int x, int y); /** The shader with which to draw all the chunks. **/ Shader *shader; /** The map of all the loaded chunks. **/ std::unordered_map<std::array<int, 2>, Chunk*> chunks; };
22.243243
184
0.584447
[ "mesh" ]
aea09b8d6c0251214df77c43816e9e302fbacb01
2,150
h
C
mindspore/lite/src/delegate/parameter_cache/lfu_cache.h
wsjlovecode/mindspore
0b5ad5318041172c1e1d825343d496100a4ae2dc
[ "Apache-2.0" ]
null
null
null
mindspore/lite/src/delegate/parameter_cache/lfu_cache.h
wsjlovecode/mindspore
0b5ad5318041172c1e1d825343d496100a4ae2dc
[ "Apache-2.0" ]
null
null
null
mindspore/lite/src/delegate/parameter_cache/lfu_cache.h
wsjlovecode/mindspore
0b5ad5318041172c1e1d825343d496100a4ae2dc
[ "Apache-2.0" ]
null
null
null
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MINDSPORE_LITE_LRU_CACHE_H_ #define MINDSPORE_LITE_LRU_CACHE_H_ #include <map> #include <unordered_map> #include <list> #include <vector> #include "include/api/status.h" #include "src/delegate/parameter_cache/cache_algorithm.h" namespace mindspore { namespace cache { class LFUCacheAlgorithm : public CacheAlgorithm { public: LFUCacheAlgorithm(size_t cache_size, int min_host_index, int max_host_index) : cache_size_(cache_size), min_host_index_(min_host_index), max_host_index_(max_host_index) {} ~LFUCacheAlgorithm() override; int Get(int key) override; void Put(int key, int value) override; Status CheckCacheHit(const int *batch_ids, const size_t batch_ids_len, int *cache_index, std::vector<int> *need_swap_indies, std::vector<int> *need_swap_indies_cache_index) override; private: CacheNoe *GetNode(int key); void GetHitNodesAndSwapIndex(const int *batch_ids, const size_t batch_ids_len, int *cache_index, std::unordered_map<int, CacheNoe *> *hit_index_nodes, std::unordered_map<int, std::vector<int>> *need_swap_map); std::list<CacheNoe *> GetSwapNodes(const std::unordered_map<int, std::vector<int>> &need_swap_map); std::unordered_map<int, std::list<CacheNoe *>::iterator> key_table_; std::map<int, std::list<CacheNoe *>> frequency_table_; size_t cache_size_; int min_host_index_{0}; int max_host_index_{1}; }; } // namespace cache } // namespace mindspore #endif // MINDSPORE_LITE_LRU_CACHE_H_
37.719298
116
0.733953
[ "vector" ]
aea8229d9e6d3e787f4fcdb3843093c20065a63d
7,091
h
C
src/auction/AuctAssoc.h
sifta/auction-assignment-lib
febcd23b32a17620d1e3088d70e08b1b76aca740
[ "MIT" ]
null
null
null
src/auction/AuctAssoc.h
sifta/auction-assignment-lib
febcd23b32a17620d1e3088d70e08b1b76aca740
[ "MIT" ]
null
null
null
src/auction/AuctAssoc.h
sifta/auction-assignment-lib
febcd23b32a17620d1e3088d70e08b1b76aca740
[ "MIT" ]
null
null
null
#ifndef AuctAssoc_H #define AuctAssoc_H #include "AssocMatrix.h" class AuctAssoc : public AssocMatrix { // ------------------------------------------------------------------ // // Constructors, Destructors, and Operators. // // ------------------------------------------------------------------ public: /** * Constructor for symmetric association matrix * @param N size of association matrix */ AuctAssoc(int N); /** * Constructor for asymmetric association matrix * @param N number of rows in association matrix * @param M number of columns in association matrix */ AuctAssoc(int N, int M); /** * Constructor for symmetric association matrix * @param S Shape of auction problem. */ AuctAssoc (AuctShape& S); /** * Destructor. */ virtual ~AuctAssoc(); // ------------------------------------------------------------------ // // Accessor Methods // // ------------------------------------------------------------------ public: /** * Access prices (scalar). * @param ind Column index of price to look up * @return Price of queried row */ inline int Price(int ind) { return prices[ind]; } /** * Set prices (scalar). * @param ind Column index of price to set * @param value Price for given row */ inline void set_Price(int ind, int value) { if (ind > -1) prices[ind] = value; } /** * Access profits (scalar). * @param ind Row index of profit to look up * @return Profit of queried column */ inline int Prof(int ind) { return profits[ind]; } /** * Set profits (scalar). * @param ind Row index of profit to set * @param value Profit of given column */ inline void set_Prof(int ind, int value) { if (ind > -1) profits[ind] = value; } /** * Access prices (vector). * @param outprices Array in which to fill prices for each row */ void get_prices(int* outprices) { std::copy(prices, prices+Ncols, outprices); } /** * Set prices (vector). * @param inprices Array containing prices for each row */ void set_prices(int* inprices) { std::copy(inprices, inprices+Ncols, prices); } /** * Access profits (vector). * @param outprof Array in which to fill profits for each column */ void get_profits(int* outprof) { std::copy(profits, profits+Nrows, outprof); } /** * Set profits (vector). * @param inprof Array containing profits for each column */ void set_profits(int* inprof) { std::copy(inprof, inprof+Nrows, profits); } // ------------------------------------------------------------------ // // Action Methods // // ------------------------------------------------------------------ public: /** * Consistency test w.r.t. a given shape. * @param Sh Shape to test consistency with * @return True if given shape is consistent with this Association Matrix */ bool is_consistent (AuctShape &Sh) { return (Sh.nfullrows() == Nrows && Sh.nfullcols() == Ncols); } /** * Transpose Associations. Overrides the AssocMatrix version. */ void transpose(); /** * compute minimum price. * @return Minimum price for all rows */ int min_price(); /** * Compute maximum price for specified rows. Only rows which have an * associated column are considered. The price returned is that of the * associated column. * @return Maximum price for rows in specified range * @param first First row in range to consider * @param last Last row in range to consider */ int max_price_in_rowrange (int first, int last); /** * Compute minimum price for specified rows. Only rows which have an * associated column are considered. The price returned is that of the * associated column. * @return Minimum price for rows in specified range * @param first First row in range to consider * @param last Last row in range to consider */ int min_price_in_rowrange (int first, int last); /** * Compute maximum price for specified columns. * @return Maximum price for rows in specified range * @param first First column in range to consider * @param last Last column in range to consider */ int max_price_in_colrange (int first, int last); /** * Compute minimum price for specified columns. * @return Minimum price for columns in specified range * @param first First column in range to consider * @param last Last column in range to consider */ int min_price_in_colrange (int first, int last); /** * Compute the minimum associated price for a columns. * @return Minimum price for columns which are associated w/ a row. */ int get_minassoc(); /** * Compute the maximum unassociated price for a columns. * @return Maximum price for a column which is not associated with any row. */ int get_maxunass(); /** * Compute the minimum associated profit for a row. * @return Minimum profit for a row which is associated w/ a column. */ int get_minassoc_prof(); /** * Compute the maximum associated price for a column. * @return Maximum profit for a row which is not associated with any column. */ int get_maxunass_prof(); /** * Equalize the prices of unassigned entries; useful in checking e-CS. * @param Shape of payoff matrix. */ void group_price_equalize(AuctShape& Sh); /** * Enforce "hidden" bids, which arise from asymmtric problems. * @param Sh Shape of Auction Problem * @param Param Auction Parameters */ void hidden_bid(AuctShape& Sh, AuctParm& Param); /** * Output associations (scalar). This is relevant for the case of * multiple assignments per row/column. * @param Sh Shape of Auction Problem * @param i Row in Original Payoff matrix to consider * @param j Column in Original Payoff matrix to consider * @return Number of associations at (i,j) in AuctPay */ int numAssoc (AuctShape& Sh, int i, int j); /** * Output assignments (vector, AuctPay based). * Return the column index (of AuctPay) of each assignments per * expanded row of the AssocMatrix. Here, multiple row assignments are * treated as separate rows. * @param Sh Shape of Auction Problem * @param ovec Output vector in which to place column indicies */ void getrowassign (AuctShape& Sh, std::vector<int>& ovec); /** * Output assignments (vector, AuctAssoc based). * Return the (expanded) column index (of AuctAssoc) of each assignment * per expanded row of the AuctAssoc matrix. * @param Sh Shape of Auction Problem * @param ovec Output vector in which to place column indicies */ void getrowassign_id (AuctShape& Sh, std::vector<int>& ovec); // // ------------------------------------------------------------------ // // Data Members. // // ------------------------------------------------------------------ // private: int *prices; //!< array of prices for each row int *profits; //!< array of profits for each column }; #endif
27.917323
81
0.606402
[ "shape", "vector" ]
aeaaa12bad4fc4ea173872f1cac09c2fa505b4b8
66,324
h
C
bdb/bdb_int.h
bowlofstew/comdb2
47fed5ab1c950508855952249985c17cb4ef23c5
[ "Apache-2.0" ]
null
null
null
bdb/bdb_int.h
bowlofstew/comdb2
47fed5ab1c950508855952249985c17cb4ef23c5
[ "Apache-2.0" ]
null
null
null
bdb/bdb_int.h
bowlofstew/comdb2
47fed5ab1c950508855952249985c17cb4ef23c5
[ "Apache-2.0" ]
null
null
null
/* Copyright 2015 Bloomberg Finance L.P. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef __bdb_int_h__ #define __bdb_int_h__ #define restrict /*#define RW_RRN_LOCK*/ #include <sys/types.h> #include <stdio.h> #include <stdlib.h> #include <db.h> #include <bb_stdint.h> #include <compile_time_assert.h> #include <object_pool.h> #include <list.h> #include <plhash.h> #include <thread_util.h> #include "bdb_cursor.h" #include "cursor_ll.h" #include "bdb_access.h" #include <compile_time_assert.h> #include <epochlib.h> #include <cheapstack.h> #include <cdb2_constants.h> #include "averager.h" #include "intern_strings.h" #include "bdb_schemachange.h" #define NAME_MANGLE #define MAXRECSZ (17 * 1024) #define MAXKEYSZ (1024) #define MAXTABLES 4096 #define MAXIX 64 #define NIL -1 #define MAXDTAFILES 16 /* primary data file + 15 blobs files */ #define MAXSTRIPE 16 /* max stripe factor */ /* Some additional error codes, chosen not to conflict with system codes * or with berkdb error codes. Use bdb_strerror() to decode. */ #define DB_ODH_CORRUPT (-40000) /* On disk header corrupt */ #define DB_UNCOMPRESS_ERR (-40001) /* Cannot inflate compressed rec */ #include "ix_return_codes.h" #include "mem_bdb.h" #include "mem_override.h" /* Public ODH constants */ enum { ODH_UPDATEID_BITS = 12, ODH_LENGTH_BITS = 28, ODH_SIZE = 7, /* We may extend for larger headers in the future, but the minimum size shall always be 7 bytes. */ ODH_SIZE_RESERVE = 7, /* Callers wishing to provide a buffer into which a record will be packed should allow this many bytes on top of the record size for the ODH. Right now this is the same as ODH_SIZE - one day it may be the max possible ODH size if we start adding fields. */ ODH_FLAG_COMPR_MASK = 0x7 }; /* snapisol log ops */ enum log_ops { LOG_APPLY = 0, LOG_PRESCAN = 1, LOG_BACKFILL = 2 }; /* These are the fields of the ondisk header. This is not the ondisk * representation but a convenient format for passing the header around in * our code. */ struct odh { uint32_t length; /* actually only 28 bits of this can be used leading to a max value of (1<<ODH_LENGTH_BITS)-1 */ uint16_t updateid; /* actually only 12 bits of this can be used leading to a max value of (1<<ODH_UPDATEID_BITS)-1 */ uint8_t csc2vers; uint8_t flags; void *recptr; /* Some functions set this to point to the decompressed record data. */ }; /* XXX TODO. look into replacing these macros with something provided by BDE team */ #ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif #ifndef MAX #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* by trial and error it seems that for queue databases the available bytes for * record data is pagesize-32. Can't seem to find an appropriate constant * in berkdb... */ #define QUEUE_PAGE_HEADER_SZ 32 void make_lsn(DB_LSN *logseqnum, unsigned int filenum, unsigned int offsetnum); struct tran_table_shadows; typedef struct tran_table_shadows tran_table_shadows_t; typedef enum { TRANCLASS_BERK = 1, TRANCLASS_LOGICAL = 2, TRANCLASS_PHYSICAL = 3, TRANCLASS_READCOMMITTED = 4, TRANCLASS_SERIALIZABLE = 5, TRANCLASS_QUERYISOLATION = 6, /* used for blocksql/socksql */ TRANCLASS_LOGICAL_NOROWLOCKS = 7, /* used in fetch.c for table locks */ TRANCLASS_SOSQL = 8, /* unfortunatelly I need this to cache a transaction */ TRANCLASS_SNAPISOL = 9 /* unfortunatelly I need this to cache a transaction */ } tranclass_type; #define PAGE_KEY \ unsigned char fileid[DB_FILE_ID_LEN]; \ db_pgno_t pgno; struct lsn_list { DB_LSN lsn; LINKC_T(struct lsn_list) lnk; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct commit_list { DB_LSN commit_lsn; unsigned long long logical_tranid; LINKC_T(struct commit_list) lnk; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct lsn_commit_list { DB_LSN lsn; DB_LSN commit_lsn; LINKC_T(struct lsn_commit_list) lnk; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct relink_list { db_pgno_t inh; DB_LSN lsn; LINKC_T(struct relink_list) lnk; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; enum { PGLOGS_QUEUE_PAGE = 1, PGLOGS_QUEUE_RELINK = 2 }; struct shadows_pglogs_queue_key { LINKC_T(struct shadows_pglogs_queue_key) lnk; unsigned long long logical_tranid; int type; db_pgno_t pgno; db_pgno_t prev_pgno; db_pgno_t next_pgno; DB_LSN lsn; DB_LSN commit_lsn; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct shadows_asof_cursor { unsigned char fileid[DB_FILE_ID_LEN]; struct shadows_pglogs_queue_key *cur; }; struct shadows_fileid_pglogs_queue { unsigned char fileid[DB_FILE_ID_LEN]; int deleteme; pthread_rwlock_t queue_lk; LISTC_T(struct shadows_pglogs_queue_key) queue_keys; }; // This is stored in a hash indexed by fileid. All cursors pointed // at a fileid maintain a pointer to the same memory. struct pglogs_queue_cursor { unsigned char fileid[DB_FILE_ID_LEN]; struct shadows_fileid_pglogs_queue *queue; struct shadows_pglogs_queue_key *last; }; struct pglogs_queue_heads { int index; unsigned char **fileids; }; struct page_logical_lsn_key { PAGE_KEY DB_LSN lsn; DB_LSN commit_lsn; }; struct shadows_pglogs_key { PAGE_KEY LISTC_T(struct lsn_list) lsns; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct shadows_pglogs_logical_key { PAGE_KEY LISTC_T(struct lsn_commit_list) lsns; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct pglogs_relink_key { PAGE_KEY LISTC_T(struct relink_list) relinks; #ifdef NEWSI_DEBUG_POOL void *pool; #endif }; struct ltran_pglogs_key { unsigned long long logical_tranid; pthread_mutex_t pglogs_mutex; DB_LSN logical_commit_lsn; /* lsn of the physical commit of the logical transaction */ hash_t *pglogs_hashtbl; hash_t *relinks_hashtbl; }; struct timestamp_lsn_key { int32_t timestamp; DB_LSN lsn; unsigned long long context; }; struct logfile_pglogs_entry { u_int32_t filenum; pthread_mutex_t pglogs_mutex; hash_t *pglogs_hashtbl; hash_t *relinks_hashtbl; }; struct shadows_pglogs_key *allocate_shadows_pglogs_key(void); struct shadows_pglogs_logical_key *allocate_shadows_pglogs_logical_key(void); struct lsn_list *allocate_lsn_list(void); struct lsn_commit_list *allocate_lsn_commit_list(void); struct pglogs_relink_key *allocate_pglogs_relink_key(void); struct relink_list *allocate_relink_list(void); void return_pglogs_queue_key(struct shadows_pglogs_queue_key *qk); struct checkpoint_list { DB_LSN lsn; DB_LSN ckp_lsn; int32_t timestamp; LINKC_T(struct checkpoint_list) lnk; }; struct tran_tag { tranclass_type tranclass; DB_TXN *tid; u_int32_t logical_lid; void *usrptr; DB_LSN savelsn; struct tran_tag *parent; DB_LSN begin_lsn; /* lsn of logical begin */ DB_LSN startlsn; /* where log was when we started */ /* snapshot bdb_state->numchildren we don't care that much if DB-s are flipping, but we don't want to see transient tailing DB-s created by schema change or fastinit */ int numchildren; /* this is index by dbnum; right now 0, 1 are meta, among them is also fstblk these will never have shadows (shrugs) */ tran_table_shadows_t *tables; /* shadow for tables */ /* this is a replacement for the genid_bitmap, keep both for now */ unsigned long long gblcontext; unsigned long long logical_tranid; /* LSN of the last logical record for this transaction */ DB_LSN last_logical_lsn; DB_LSN last_physical_commit_lsn; /* which lsn generated the startgenid */ DB_LSN snapy_commit_lsn; uint32_t snapy_commit_generation; DB_LSN last_regop_lsn; /* LSN of the the physical commit/abort txn */ DB_LSN commit_lsn; /* lsn when the tran obj was created */ DB_LSN birth_lsn; /* Birth lsn of oldest outstanding logical txn at start time */ DB_LSN oldest_txn_at_start; /* List of outstanding logical txns at start */ uint64_t *bkfill_txn_list; /* Number of outstanding logical txns at start */ int bkfill_txn_count; /* tran obj was created as of we were at a lsn*/ DB_LSN asof_lsn; /* oldest logical ref point of a begin-as-of tran*/ DB_LSN asof_ref_lsn; /* hash table for pglogs */ hash_t *pglogs_hashtbl; /* hash table for relinks */ hash_t *relinks_hashtbl; pthread_mutex_t pglogs_mutex; /* hash table to keep track of whether we have copied pglogs from the gbl structure for a given page */ hash_t *asof_hashtbl; /* temporary: used in logical abort case */ hash_t *compensated_records; /* anchor in bdb_state->transactions */ LINKC_T(struct tran_tag) tranlist_lnk; /* For non-clustered sql offloading we pass the tran object allocated * in the block processor to the sql engine pool. Then when th sql engine * creates shadow indexes it uses its thread id as part of the file name. * However the shadow files don't get deleted until commit or abort time * on the original block processor thread, by which time the sql engine * thread may have been freed and reused for another transation. * Get round this by recording the threadid of the thread that creates * the transaction and using this in shadow file names. */ pthread_t threadid; /* for recom and snapisol/serial, record in startgenid the context when this transaction was started; - for recom it is used to differentiate between synthetic genids and real(existing) genids - for si, this is also used to mask out new updates */ unsigned long long startgenid; /* For logical transactions: a logical transaction may have a (one and only one) physical transaction in flight. Latch it here for debugging and sanity checking */ struct tran_tag *physical_tran; /* For a physical transaction, chain up to the logical transaction */ struct tran_tag *logical_tran; /* snapshot/serializable support */ struct bdb_osql_trn *osql; /* this is tested in rep.c to see if net needs to flush/wait */ signed char is_about_to_commit; signed char aborted; signed char rep_handle_dead; /* must reopen all db cursors after abort */ /* set if we are a top level transaction (ie, not a child) */ signed char master; /* Set if we were created from the replication stream */ signed char reptxn; signed char wrote_begin_record; signed char committed_begin_record; signed char get_schema_lock; signed char single_physical_transaction; /* log support */ signed char trak; /* set this to enable tracking */ /* query isolation support, a property of a session */ signed char ignore_newer_updates; signed char is_rowlocks_trans; /* if the txn intends to write, this tells us to get write locks when we read */ signed char write_intent; /* Open cursors under this transaction. */ LISTC_T(bdb_cursor_ifn_t) open_cursors; /* Committed the child transaction. */ signed char committed_child; /* total shadow rows */ int shadow_rows; /* Set to 1 if we got the bdb lock */ int got_bdb_lock; /* Set to 1 if this is a schema change txn */ int schema_change_txn; /* cache the versions of dta files to catch schema changes and fastinits */ int table_version_cache_sz; unsigned long long *table_version_cache; bdb_state_type *parent_state; /* Send the master periodic 'acks' after this many physical commits */ int request_ack; int check_shadows; int micro_commit; /* Rowlocks commit support */ pool_t *rc_pool; DBT **rc_list; DB_LOCK *rc_locks; u_int32_t rc_max; u_int32_t rc_count; /* Newsi pglogs queue hash */ hash_t *pglogs_queue_hash; }; struct seqnum_t { DB_LSN lsn; // For master lease uint32_t issue_time[2]; uint32_t lease_ms; uint32_t commit_generation; uint32_t generation; }; enum { BDB_SEQNUM_TYPE_LEN = 8 + 2 + 2 + 4 + 12 }; BB_COMPILE_TIME_ASSERT(bdb_seqnum_type, sizeof(struct seqnum_t) == BDB_SEQNUM_TYPE_LEN); struct filepage_t { unsigned int fileid; /* fileid to prefault */ unsigned int pgno; /* page number to prefault*/ }; enum { BDB_FILEPAGE_TYPE_LEN = 4 + 4 }; BB_COMPILE_TIME_ASSERT(bdb_filepage_type, sizeof(struct filepage_t) == BDB_FILEPAGE_TYPE_LEN); /* terminate list w/ index == -1 */ typedef struct { unsigned long long context; short index; } cmpcontextlist_type; struct thread_lock_info_tag; typedef struct thread_lock_info_tag thread_lock_info_type; #ifndef __bdb_api_h__ struct bdb_state_tag; typedef struct bdb_state_tag bdb_state_type; struct bdb_callback_tag; typedef struct bdb_callback_tag bdb_callback_type; struct tran_tag; typedef struct tran_tag tran_type; struct bdb_attr_tag; typedef struct bdb_attr_tag bdb_attr_type; struct bdb_temp_hash; typedef struct bdb_temp_hash bdb_temp_hash; struct bulk_dump; typedef struct bulk_dump bulk_dump; struct dtadump; typedef struct dtadump dtadump; #endif struct bdb_queue_priv; typedef struct bdb_queue_priv bdb_queue_priv; struct bdb_cursor_thd_tag; typedef struct bdb_cursor_thd_tag bdb_cursor_thd_t; enum bdbcursor_types { BDBC_UN = 0, BDBC_IX = 1, BDBC_DT = 2, BDBC_SK = 3, BDBC_BL = 4 }; char const *cursortype(int type); /* track the cursor threading */ struct bdb_cursor_thd_tag { int x; }; struct bdb_cursor_impl_tag { /* cursor btree info */ enum bdbcursor_types type; /* BDBC_IX, BDBC_DT */ int dbnum; /* dbnum for this bdbcursor */ int idx; /* BDBC_IX:ixnum, BDBC_DT:split_dta_num */ /* transaction */ bdb_state_type *state; /* state for */ cursor_tran_t *curtran; /* all cursors (but comdb2 mode have this */ tran_type *shadow_tran; /* read committed and snapshot/serializable modes */ /* cursor position */ int rrn; /* == 2 (don't need this) */ unsigned long long genid; /* genid of current entry */ void *data; /* points inside one of bdb_berkdb_t if valid */ int datalen; /* size of payload */ void *datacopy; void *unpacked_datacopy; /* new btree access interface */ bdb_berkdb_t *rl; /* persistent berkdb */ bdb_berkdb_t *sd; /* shadow berkdb */ /* comdb2 mode support */ DBCPS dbcps; /* serialized cursor */ /* perfmetrics */ int nsteps; /* count ops */ /* read committed/snapshot/serializable mode support */ tmpcursor_t *skip; /* skip list; don't touch this, use bdb_osql please */ char *lastkey; /* set after a row is consumed from real data (see merging) */ int lastkeylen; int laststripe; int lastpage; int lastindex; /* read committed/snapshot/serializable (maybe we should merge this here, in * bdb, not in db) */ tmpcursor_t *addcur; /* cursors for add and upd data shadows; */ void *addcur_odh; /* working area for addcur odh. */ int addcur_use_odh; /* page-order read committed/snapshot/serializable/snapisol */ tmpcursor_t *pgordervs; /* support for deadlock */ int invalidated; /* mark this if the cursor was unlocked */ /* page-order flags */ int pageorder; /* mark if the cursor is in page-order */ int discardpages; /* mark if the pages should be discarded immediately */ tmptable_t *vs_stab; /* Table of records to skip in the virtual stripe. */ tmpcursor_t *vs_skip; /* Cursor for vs_stab. */ #if 0 tmptable_t *cstripe; /* Cursor stripe */ tmpcursor_t *cscur; /* Cursor for cstripe */ #endif int new_skip; /* Set to 1 when the vs_skip has a new record. */ int last_skip; /* Set to 1 if we've passed the last record. */ unsigned long long agenid; /* The last addcur genid. */ int repo_addcur; /* Set to 1 if we've added to addcur. */ int threaded; /* mark if this is this is threaded */ int upd_shadows_count; /* XXX todo */ bdb_cursor_thd_t *thdinfo; /* track cursor threadinfo */ /* if pointer */ struct bdb_cursor_ifn *ifn; /* col attributes */ char *collattr; /* pointer to tailing data, if any */ int collattr_len; /* snapisol may need prescanning the updates to filter out older genids added by younger commits */ int need_prescan; int *pagelockflag; int max_page_locks; int rowlocks; struct pglogs_queue_cursor *queue_cursor; uint8_t ver; uint8_t trak; /* debug this cursor: set to 1 for verbose */ uint8_t used_rl; /* set to 1 if rl position was consumed */ uint8_t used_sd; /* set to 1 if sd position was consumed */ }; struct bdb_cursor_ser_int { uint8_t is_valid; DBCS dbcs; }; typedef struct bdb_cursor_ser_int bdb_cursor_ser_int_t; #include "bdb_api.h" #include "list.h" struct deferred_berkdb_option { char *attr; char *value; int ivalue; LINKC_T(struct deferred_berkdb_option) lnk; }; struct bdb_attr_tag { #define DEF_ATTR(NAME, name, type, dflt) int name; #include "attr.h" #undef DEF_ATTR LISTC_T(struct deferred_berkdb_option) deferred_berkdb_options; }; typedef int (*BDBFP)(); /*was called FP, but that clashed with dbutil.h - sj */ struct bdb_callback_tag { WHOISMASTERFP whoismaster_rtn; NODEUPFP nodeup_rtn; GETROOMFP getroom_rtn; REPFAILFP repfail_rtn; BDBAPPSOCKFP appsock_rtn; PRINTFP print_rtn; BDBELECTSETTINGSFP electsettings_rtn; BDBCATCHUPFP catchup_rtn; BDBTHREADDUMPFP threaddump_rtn; BDBGETFILELWMFP get_file_lwm_rtn; BDBSETFILELWMFP set_file_lwm_rtn; SCDONEFP scdone_rtn; SCABORTFP scabort_rtn; UNDOSHADOWFP undoshadow_rtn; NODEDOWNFP nodedown_rtn; SERIALCHECK serialcheck_rtn; }; struct waiting_for_lsn { DB_LSN lsn; int start; LINKC_T(struct waiting_for_lsn) lnk; }; typedef LISTC_T(struct waiting_for_lsn) wait_for_lsn_list; typedef struct { seqnum_type *seqnums; /* 1 per node num */ pthread_mutex_t lock; pthread_cond_t cond; pthread_key_t key; wait_for_lsn_list **waitlist; short *expected_udp_count; short *incomming_udp_count; short *udp_average_counter; int *filenum; pool_t *trackpool; /* need to do a bit better here... */ struct averager **time_10seconds; struct averager **time_minute; } seqnum_info_type; typedef struct { int rep_process_message; int rep_zerorc; int rep_newsite; int rep_holdelection; int rep_newmaster; int rep_dupmaster; int rep_isperm; int rep_notperm; int rep_outdated; int rep_other; int dummy_adds; int commits; } repstats_type; struct sockaddr_in; typedef struct { netinfo_type *netinfo; netinfo_type *netinfo_signal; char *master_host; char *myhost; pthread_mutex_t elect_mutex; int *appseqnum; /* application level (bdb lib) sequencing */ pthread_mutex_t appseqnum_lock; pthread_mutex_t upgrade_lock; /* ensure only 1 upgrade at a time */ pthread_mutex_t send_lock; repstats_type repstats; pthread_mutex_t receive_lock; signed char in_rep_process_message; signed char disable_watcher; signed char in_election; /* true if we are in the middle of an election */ signed char upgrade_allowed; int skipsinceepoch; /* since when have we been incoherent */ int rep_process_message_start_time; int dont_elect_untill_time; struct sockaddr_in *udp_addr; pthread_t udp_thread; int udp_fd; int should_reject_timestamp; int should_reject; } repinfo_type; enum { STATE_COHERENT = 0, STATE_INCOHERENT = 1, STATE_INCOHERENT_SLOW = 2, STATE_INCOHERENT_WAIT = 3 }; struct bdb_state_tag; /* Every time we add a blkseq, if the log file rolled, we add a new * entry with the earliest blkseq in the new log. We maintain this list in * bdb_blkseq_insert and in bdb_blkseq_recover (should really call * bdb_blkseq_insert * in recovery instead). In log deletion code, we walk the list, and disallow * deletion * for log files where the blkseq is too new. */ struct seen_blkseq { u_int32_t logfile; int timestamp; LINKC_T(struct seen_blkseq) lnk; }; struct temp_table; struct bdb_state_tag { pthread_attr_t pthread_attr_detach; seqnum_info_type *seqnum_info; bdb_attr_type *attr; /* attributes that have defaults */ bdb_callback_type *callback; /* callback functions */ DB_ENV *dbenv; /* transactional environment */ int read_write; /* if we opened the db with R/W access */ repinfo_type *repinfo; /* replication info */ signed char numdtafiles; /* the berkeley db btrees underlying this "table" */ DB *dbp_data[MAXDTAFILES][MAXSTRIPE]; /* the data files. dbp_data[0] is the primary data file which would contain the record. higher files are extra data aka the blobs. in blobstripe mode the blob files are striped too, otherwise they are not. */ DB *dbp_ix[MAXIX]; /* handle for the ixN files */ pthread_key_t tid_key; int numthreads; pthread_mutex_t numthreads_lock; char *name; /* name of the comdb */ char *txndir; /* name of the transaction directory for log files */ char *tmpdir; /* name of directory for tempoarary dbs */ char *dir; /* directory the files go in (/bb/data /bb/data2) */ int lrl; /* Logical Record Length (0 = variable) */ short numix; /* number of indexes */ short ixlen[MAXIX]; /* size of each index */ signed char ixdta[MAXIX]; /* does this index contain the dta? */ signed char ixcollattr[MAXIX]; /* does this index contain the column attributes? */ signed char ixnulls [MAXIX]; /*does this index contain any columns that allow nulls?*/ signed char ixdups[MAXIX]; /* 1 if ix allows dupes, else 0 */ signed char ixrecnum[MAXIX]; /* 1 if we turned on recnum mode for btrees */ short keymaxsz; /* size of the keymax buffer */ /* the helper threads (only valid for a "parent" bdb_state) */ pthread_t checkpoint_thread; pthread_t watcher_thread; pthread_t memp_trickle_thread; pthread_t logdelete_thread; pthread_t lock_detect_thread; pthread_t coherency_lease_thread; pthread_t master_lease_thread; struct bdb_state_tag *parent; /* pointer to our parent */ short numchildren; struct bdb_state_tag *children[MAXTABLES]; pthread_rwlock_t *bdb_lock; /* we need this to do safe upgrades. fetch operations get a read lock, upgrade requires a write lock - this way we can close and re-open databases knowing that there are no cursors opened on them */ signed char bdb_lock_desired; /* signal that long running operations like fast dump should GET OUT! so that we can upgrade/downgrade */ void *usr_ptr; pthread_t bdb_lock_write_holder; thread_lock_info_type *bdb_lock_write_holder_ptr; char bdb_lock_write_idstr[80]; int seed; unsigned int last_genid_epoch; pthread_mutex_t seed_lock; /* One of the BDBTYPE_ constants */ int bdbtype; /* Lite databases have no rrn cache, freerecs files, ix# files */ int pagesize_override; /* 0, or a power of 2 */ size_t queue_item_sz; /* size of a queue record in bytes (including * struct bdb_queue_header) */ /* bit mask of which consumers want to consume new queue items */ uint32_t active_consumers; unsigned long long master_cmpcontext; /* stuff for the genid->thread affinity logic */ int maxthreadid; unsigned char stripe_pool[17]; unsigned char stripe_pool_start; pthread_mutex_t last_dta_lk; int last_dta; /* when did we convert to blobstripe? */ unsigned long long blobstripe_convert_genid; pthread_mutex_t pending_broadcast_lock; unsigned long long gblcontext; void (*signal_rtoff)(void); int checkpoint_start_time; hash_t *logical_transactions_hash; DB_LSN lwm; /* low watermark for logical transactions */ /* chain all transactions */ pthread_mutex_t translist_lk; LISTC_T(struct tran_tag) logical_transactions_list; /* for queues this points to extra stuff defined in queue.c */ bdb_queue_priv *qpriv; void *temp_list; pthread_mutex_t temp_list_lock; comdb2_objpool_t temp_table_pool; /* pooled temptables */ pthread_t priosqlthr; int haspriosqlthr; int temp_table_id; int num_temp_tables; DB_MPOOL_STAT *temp_stats; pthread_mutex_t id_lock; unsigned int id; pthread_mutex_t gblcontext_lock; pthread_mutex_t children_lock; FILE *bdblock_debug_fp; pthread_mutex_t bdblock_debug_lock; uint8_t version; /* access control */ bdb_access_t *access; char *origname; /* name before new.name shenanigans */ pthread_mutex_t exit_lock; signed char have_recnums; /* 1 if ANY index has recnums enabled */ signed char exiting; signed char caught_up; /* if we passed the recovery phase */ signed char isopen; signed char envonly; signed char need_to_downgrade_and_lose; signed char rep_trace; signed char check_for_isperm; signed char got_isperm; signed char berkdb_rep_startupdone; signed char rep_started; signed char master_handle; signed char sanc_ok; signed char ondisk_header; /* boolean: give each record an ondisk header? */ signed char compress; /* boolean: compress data? */ signed char compress_blobs; /*boolean: compress blobs? */ signed char got_gblcontext; signed char need_to_upgrade; signed char in_recovery; signed char in_bdb_recovery; signed char low_headroom_count; signed char pending_seqnum_broadcast; int *coherent_state; uint64_t *master_lease; pthread_mutex_t master_lease_lk; signed char after_llmeta_init_done; pthread_mutex_t coherent_state_lock; signed char not_coherent; /*master sets this if it knows were not coherent */ int not_coherent_time; uint64_t *last_downgrade_time; /* old databases with datacopy don't have odh in index */ signed char datacopy_odh; /* inplace updates setting */ signed char inplace_updates; signed char instant_schema_change; signed char rep_handle_dead; /* keep this as an int, it's read locklessly */ int passed_dbenv_open; /* These are only used in a parent bdb_state. This is a linked list of * all the thread specific lock info structs. This is here currently * just to make debugging lock issues abit easier. */ pthread_mutex_t thread_lock_info_list_mutex; LISTC_T(thread_lock_info_type) thread_lock_info_list; /* cache the version_num for the data; this is used to detect dta changes */ unsigned long long version_num; pthread_cond_t temptable_wait; #ifdef DEBUG_TEMP_TABLES LISTC_T(struct temp_table) busy_temptables; #endif char *recoverylsn; int disable_page_order_tablescan; pthread_mutex_t *blkseq_lk; DB_ENV **blkseq_env; DB **blkseq[2]; time_t blkseq_last_roll_time; DB_LSN *blkseq_last_lsn[2]; LISTC_T(struct seen_blkseq) blkseq_log_list[2]; uint32_t genid_format; /* we keep a per bdb_state copy to enhance locality */ unsigned int bmaszthresh; comdb2bma bma; pthread_mutex_t durable_lsn_lk; pthread_cond_t durable_lsn_wait; uint16_t *fld_hints; }; /* define our net user types */ enum { USER_TYPE_BERKDB_REP = 1, USER_TYPE_BERKDB_NEWSEQ = 2, USER_TYPE_BERKDB_FILENUM = 3, USER_TYPE_TEST = 4, USER_TYPE_ADD = 5, USER_TYPE_DEL = 6, USER_TYPE_DECOM = 7, USER_TYPE_ADD_DUMMY = 8, USER_TYPE_REPTRC = 9, USER_TYPE_RECONNECT = 10, USER_TYPE_LSNCMP = 11, USER_TYPE_RESYNC = 12, USER_TYPE_DOWNGRADEANDLOSE = 13, USER_TYPE_INPROCMSG = 14, USER_TYPE_COMMITDELAYMORE = 15, USER_TYPE_COMMITDELAYNONE = 16, USER_TYPE_MASTERCMPCONTEXTLIST = 18, USER_TYPE_GETCONTEXT = 19, USER_TYPE_HEREISCONTEXT = 20, USER_TYPE_TRANSFERMASTER = 21, USER_TYPE_GBLCONTEXT = 22, USER_TYPE_YOUARENOTCOHERENT = 23, USER_TYPE_YOUARECOHERENT = 24, USER_TYPE_UDP_ACK, USER_TYPE_UDP_PING, USER_TYPE_UDP_TIMESTAMP, USER_TYPE_UDP_TIMESTAMP_ACK, USER_TYPE_UDP_PREFAULT, USER_TYPE_TCP_TIMESTAMP, USER_TYPE_TCP_TIMESTAMP_ACK, USER_TYPE_PING_TIMESTAMP, USER_TYPE_PING_TIMESTAMP_ACK, USER_TYPE_ANALYZED_TBL, USER_TYPE_COHERENCY_LEASE, USER_TYPE_PAGE_COMPACT, /* by hostname messages */ USER_TYPE_DECOM_NAME, USER_TYPE_ADD_NAME, USER_TYPE_DEL_NAME, USER_TYPE_TRANSFERMASTER_NAME, USER_TYPE_DURABLE_LSN, USER_TYPE_REQ_START_LSN }; void print(bdb_state_type *bdb_state, char *format, ...); typedef struct { DB_LSN lsn; int delta; } lsn_cmp_type; enum { BDB_LSN_CMP_TYPE_LEN = 8 + 4 }; BB_COMPILE_TIME_ASSERT(bdb_lsn_cmp_type, sizeof(lsn_cmp_type) == BDB_LSN_CMP_TYPE_LEN); uint8_t *bdb_lsn_cmp_type_put(const lsn_cmp_type *p_lsn_cmp_type, uint8_t *p_buf, const uint8_t *p_buf_end); const uint8_t *bdb_lsn_cmp_type_get(lsn_cmp_type *p_lsn_cmp_type, const uint8_t *p_buf, const uint8_t *p_buf_end); typedef struct colease { u_int64_t issue_time; u_int32_t lease_ms; u_int8_t fluff[4]; } colease_t; enum { COLEASE_TYPE_LEN = 8 + 4 + 4 }; BB_COMPILE_TIME_ASSERT(colease_type_len, sizeof(colease_t) == COLEASE_TYPE_LEN); const uint8_t *colease_type_get(colease_t *p_colease_type, const uint8_t *p_buf, const uint8_t *p_buf_end); uint8_t *colease_type_put(const colease_t *p_colease_type, uint8_t *p_buf, uint8_t *p_buf_end); typedef struct udp_durable_lsn_type { DB_LSN lsn; uint32_t generation; } udp_durable_lsn_t; enum { UDP_DURABLE_LSN_TYPE_LEN = sizeof(DB_LSN) + sizeof(uint32_t) }; BB_COMPILE_TIME_ASSERT(udp_durable_lsn_type, sizeof(udp_durable_lsn_t) == UDP_DURABLE_LSN_TYPE_LEN); uint8_t * udp_durable_lsn_type_put(const udp_durable_lsn_t *p_udp_durable_lsn_type, uint8_t *p_buf, uint8_t *p_buf_end); const uint8_t * udp_durable_lsn_type_get(udp_durable_lsn_t *p_udp_durable_lsn_type, const uint8_t *p_buf, const uint8_t *p_buf_end); /* Each data item fragment has this header. */ struct bdb_queue_header { /* zero based index of this fragment */ bbuint16_t fragment_no; /* how many fragments make up this record - must be at least 1! */ bbuint16_t num_fragments; /* genid of this item */ bbuint32_t genid[2]; /* the total size of this item */ bbuint32_t total_sz; /* the size of this fragment in bytes */ bbuint32_t fragment_sz; /* a bit is set for each consumer that has not yet consumed this record */ bbuint32_t consumer_mask; /* this is a debug feature - prod build just writes zero here. * to make sure I reassemble all the fragments correctly. */ bbuint32_t crc32; /* zero for now */ bbuint32_t reserved; /*char data[1];*/ }; enum { QUEUE_HDR_LEN = 2 + 2 + 8 + 4 + 4 + 4 + 4 + 4 }; BB_COMPILE_TIME_ASSERT(bdb_queue_header_size, sizeof(struct bdb_queue_header) == QUEUE_HDR_LEN); enum { FETCH_INT_CUR = 0, FETCH_INT_NEXT = 1, FETCH_INT_PREV = 2, FETCH_INT_CUR_LASTDUPE = 3, FETCH_INT_CUR_BY_RECNUM = 4 }; extern pthread_key_t bdb_key; char *bdb_strerror(int error); char *bdb_trans(const char infile[], char outfile[]); void *mymalloc(size_t size); void myfree(void *ptr); void *myrealloc(void *ptr, size_t size); void bdb_get_txn_stats(bdb_state_type *bdb_state, int *txn_commits); int bdb_upgrade(bdb_state_type *bdb_state, int *done); int bdb_downgrade(bdb_state_type *bdb_state, int *done); int bdb_downgrade_noelect(bdb_state_type *bdb_state); int get_seqnum(bdb_state_type *bdb_state, const char *host); void bdb_set_key(bdb_state_type *bdb_state); uint64_t subtract_lsn(bdb_state_type *bdb_state, DB_LSN *lsn1, DB_LSN *lsn2); void get_my_lsn(bdb_state_type *bdb_state, DB_LSN *lsnout); void rep_all_req(bdb_state_type *bdb_state); void get_master_lsn(bdb_state_type *bdb_state, DB_LSN *lsnout); void bdb_print_log_files(bdb_state_type *bdb_state); char *lsn_to_str(char lsn_str[], DB_LSN *lsn); int bdb_get_datafile_num_files(bdb_state_type *bdb_state, int dtanum); /* genid related utilities */ unsigned long long get_genid(bdb_state_type *bdb_state, unsigned int dtafile); DB *get_dbp_from_genid(bdb_state_type *bdb_state, int dtanum, unsigned long long genid, int *out_dtafile); unsigned long long set_participant_stripeid(bdb_state_type *bdb_state, int stripeid, unsigned long long genid); unsigned long long set_updateid(bdb_state_type *bdb_state, int updateid, unsigned long long genid); int get_participant_stripe_from_genid(bdb_state_type *bdb_state, unsigned long long genid); int get_updateid_from_genid(bdb_state_type *bdb_state, unsigned long long genid); int max_participant_stripeid(bdb_state_type *bdb_state); int max_updateid(bdb_state_type *bdb_state); unsigned long long get_search_genid(bdb_state_type *bdb_state, unsigned long long genid); unsigned long long get_search_genid_rowlocks(unsigned long long genid); int bdb_inplace_cmp_genids(bdb_state_type *bdb_state, unsigned long long g1, unsigned long long g2); /* prototype pinched from Berkeley DB internals. */ int __rep_send_message(DB_ENV *, char *, u_int32_t, DB_LSN *, const DBT *, u_int32_t, void *); /* Error handling utilities */ int bdb_dbcp_close(DBC **dbcp_ptr, int *bdberr, const char *context_str); void bdb_cursor_error(bdb_state_type *bdb_state, DB_TXN *tid, int rc, int *bdberr, const char *context_str); void bdb_get_error(bdb_state_type *bdb_state, DB_TXN *tid, int rc, int not_found_rc, int *bdberr, const char *context_str); void bdb_c_get_error(bdb_state_type *bdb_state, DB_TXN *tid, DBC **dbcp, int rc, int not_found_rc, int *bdberr, const char *context_str); /* compression wrappers for I/O */ void bdb_maybe_compress_data(bdb_state_type *bdb_state, DBT *data, DBT *data2); void bdb_maybe_uncompress_data(bdb_state_type *bdb_state, DBT *data, DBT *data2); int bdb_cget_unpack(bdb_state_type *bdb_state, DBC *dbcp, DBT *key, DBT *data, uint8_t *ver, u_int32_t flags); int bdb_cget_unpack_blob(bdb_state_type *bdb_state, DBC *dbcp, DBT *key, DBT *data, uint8_t *ver, u_int32_t flags); int bdb_get_unpack_blob(bdb_state_type *bdb_state, DB *db, DB_TXN *tid, DBT *key, DBT *data, uint8_t *ver, u_int32_t flags); int bdb_get_unpack(bdb_state_type *bdb_state, DB *db, DB_TXN *tid, DBT *key, DBT *data, uint8_t *ver, u_int32_t flags); int bdb_put_pack(bdb_state_type *bdb_state, int is_blob, DB *db, DB_TXN *tid, DBT *key, DBT *data, u_int32_t flags); int bdb_cput_pack(bdb_state_type *bdb_state, int is_blob, DBC *dbcp, DBT *key, DBT *data, u_int32_t flags); int bdb_put(bdb_state_type *bdb_state, DB *db, DB_TXN *tid, DBT *key, DBT *data, u_int32_t flags); int bdb_cposition(bdb_state_type *bdb_state, DBC *dbcp, DBT *key, u_int32_t flags); int bdb_update_updateid(bdb_state_type *bdb_state, DBC *dbcp, unsigned long long oldgenid, unsigned long long newgenid); int bdb_cget(bdb_state_type *bdb_state, DBC *dbcp, DBT *key, DBT *data, u_int32_t flags); void init_odh(bdb_state_type *bdb_state, struct odh *odh, void *rec, size_t reclen, int dtanum); int bdb_pack(bdb_state_type *bdb_state, const struct odh *odh, void *to, size_t tolen, void **recptr, uint32_t *recsize, void **freeptr); int bdb_unpack(bdb_state_type *bdb_state, const void *from, size_t fromlen, void *to, size_t tolen, struct odh *odh, void **freeptr); int ip_updates_enabled_sc(bdb_state_type *bdb_state); int ip_updates_enabled(bdb_state_type *bdb_state); /* file.c */ void delete_log_files(bdb_state_type *bdb_state); void delete_log_files_list(bdb_state_type *bdb_state, char **list); void delete_log_files_chkpt(bdb_state_type *bdb_state); int bdb_checkpoint_list_init(); int bdb_checkpoint_list_push(DB_LSN lsn, DB_LSN ckp_lsn, int32_t timestamp); void bdb_checkpoint_list_get_ckplsn_before_lsn(DB_LSN lsn, DB_LSN *lsnout); /* rep.c */ int bdb_is_skip(bdb_state_type *bdb_state, int node); void bdb_set_skip(bdb_state_type *bdb_state, int node); void bdb_clear_skip(bdb_state_type *bdb_state, int node); /* remove all nodes from skip list */ void bdb_clear_skip_list(bdb_state_type *bdb_state); /* tran.c */ int bdb_tran_rep_handle_dead(bdb_state_type *bdb_state); /* useful debug stuff we've exposed in berkdb */ extern void __bb_dbreg_print_dblist(DB_ENV *dbenv, void (*prncallback)(void *userptr, const char *fmt, ...), void *userptr); #if 0 extern void __db_cprint(DB *db); #endif void bdb_queue_init_priv(bdb_state_type *bdb_state); unsigned long long bdb_get_gblcontext(bdb_state_type *bdb_state); int bdb_apprec(DB_ENV *dbenv, DBT *log_rec, DB_LSN *lsn, db_recops op); int bdb_rowlock_int(DB_ENV *dbenv, DB_TXN *txn, unsigned long long genid, int exclusive); int rep_caught_up(bdb_state_type *bdb_state); void call_for_election(bdb_state_type *bdb_state); int bdb_next_dtafile(bdb_state_type *bdb_state); int ll_key_add(bdb_state_type *bdb_state, unsigned long long genid, tran_type *tran, int ixnum, DBT *dbt_key, DBT *dbt_data); int ll_dta_add(bdb_state_type *bdb_state, unsigned long long genid, DB *dbp, tran_type *tran, int dtafile, int dtastripe, DBT *dbt_key, DBT *dbt_data, int flags); int ll_dta_upd(bdb_state_type *bdb_state, int rrn, unsigned long long oldgenid, unsigned long long *newgenid, DB *dbp, tran_type *tran, int dtafile, int dtastripe, int participantstripid, int use_new_genid, DBT *verify_dta, DBT *dta, DBT *old_dta_out); int ll_dta_upd_rowlocks(bdb_state_type *bdb_state, int rrn, unsigned long long oldgenid, unsigned long long *newgenid, DB *dbp, tran_type *tran, int dtafile, int dtastripe, int participantstripid, int use_new_genid, DBT *verify_dta, DBT *dta, DBT *old_dta_out, DBT *lock, DB_LOCK *lk, unsigned long long *pvgenid, DBT *pvlock, DB_LOCK *plk); int ll_dta_upd_blob(bdb_state_type *bdb_state, int rrn, unsigned long long oldgenid, unsigned long long newgenid, DB *dbp, tran_type *tran, int dtafile, int participantstripid, int use_new_genid, DBT *dta); int ll_dta_upd_blob_w_opt(bdb_state_type *bdb_state, int rrn, unsigned long long oldgenid, unsigned long long *newgenid, DB *dbp, tran_type *tran, int dtafile, int dtastripe, int participantstripid, int use_new_genid, DBT *verify_dta, DBT *dta, DBT *old_dta_out); int ll_key_upd(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long oldgenid, unsigned long long genid, void *key, int ixnum, int keylen, void *dta, int dtalen); int ll_key_upd_rowlocks(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long oldgenid, unsigned long long genid, void *key, int ixnum, int keylen, void *dta, int dtalen); int ll_key_del(bdb_state_type *bdb_state, tran_type *tran, int ixnum, void *key, int keylen, int rrn, unsigned long long genid, int *payloadsz); int ll_dta_del(bdb_state_type *bdb_state, tran_type *tran, int rrn, unsigned long long genid, DB *dbp, int dtafile, int dtastripe, DBT *dta_out); int ll_key_del_rowlocks(bdb_state_type *bdb_state, tran_type *tran, int ixnum, void *key, int keylen, int rrn, unsigned long long genid, int *payloadsz, unsigned long long *pvgenid, unsigned long long *nxgenid, DBT *pvlock, DB_LOCK *plk, DBT *nxlock, DB_LOCK *nlk); int ll_dta_del_rowlocks(bdb_state_type *bdb_state, tran_type *tran, int rrn, unsigned long long genid, DB *dbp, int dtafile, int dtastripe, DBT *dta_out, unsigned long long *pvgenid, DBT *lkname, DB_LOCK *prevlk); int ll_dta_upgrade(bdb_state_type *bdb_state, int rrn, unsigned long long genid, DB *dbp, tran_type *tran, int dtafile, int dtastripe, DBT *dta); int add_snapisol_logging(bdb_state_type *bdb_state); int phys_key_add(bdb_state_type *bdb_state, tran_type *tran, unsigned long long genid, int ixnum, DBT *dbt_key, DBT *dbt_data); int phys_dta_add(bdb_state_type *bdb_state, tran_type *tran, unsigned long long genid, DB *dbp, int dtafile, int dtastripe, DBT *dbt_key, DBT *dbt_data); int get_physical_transaction(bdb_state_type *bdb_state, tran_type *logical_tran, tran_type **outtran); int phys_dta_upd(bdb_state_type *bdb_state, int rrn, unsigned long long oldgenid, unsigned long long *newgenid, DB *dbp, tran_type *logical_tran, int dtafile, int dtastripe, DBT *verify_dta, DBT *dta); int phys_key_upd(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long oldgenid, unsigned long long genid, void *key, int ix, int keylen, void *dta, int dtalen, int llog_payload_len); int phys_rowlocks_log_bench_lk(bdb_state_type *bdb_state, tran_type *logical_tran, int op, int arg1, int arg2, void *payload, int paylen); int phys_key_del(bdb_state_type *bdb_state, tran_type *logical_tran, unsigned long long genid, int ixnum, DBT *key); int phys_dta_del(bdb_state_type *bdb_state, tran_type *logical_tran, int rrn, unsigned long long genid, DB *dbp, int dtafile, int dtastripe); int bdb_llog_add_dta_lk(bdb_state_type *bdb_state, tran_type *tran, unsigned long long genid, int dtafile, int dtastripe); int bdb_llog_del_dta_lk(bdb_state_type *bdb_state, tran_type *tran, unsigned long long genid, DBT *dbt_data, int dtafile, int dtastripe); int bdb_llog_upd_dta_lk(bdb_state_type *bdb_state, tran_type *tran, unsigned long long oldgenid, unsigned long long newgenid, int dtafile, int dtastripe, DBT *olddta); int bdb_llog_add_ix_lk(bdb_state_type *bdb_state, tran_type *tran, int ix, unsigned long long genid, DBT *key, int dtalen); int bdb_llog_del_ix_lk(bdb_state_type *bdb_state, tran_type *tran, int ixnum, unsigned long long genid, DBT *dbt_key, int payloadsz); int bdb_llog_upd_ix_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, void *key, int keylen, int ix, int dtalen, unsigned long long oldgenid, unsigned long long newgenid); int bdb_llog_rowlocks_bench(bdb_state_type *bdb_state, tran_type *tran, int op, int arg1, int arg2, DBT *lock1, DBT *lock2, void *payload, int paylen); int bdb_llog_commit(bdb_state_type *bdb_state, tran_type *tran, int isabort); int bdb_save_row_int(bdb_state_type *bdb_state_in, DB_TXN *txnid, char table[], unsigned long long genid); int abort_logical_transaction(bdb_state_type *bdb_state, tran_type *tran, DB_LSN *lsn, int about_to_commit); int ll_rowlocks_bench(bdb_state_type *bdb_state, tran_type *tran, int op, int arg1, int arg2, void *payload, int paylen); int ll_checkpoint(bdb_state_type *bdb_state, int force); int bdb_llog_start(bdb_state_type *bdb_state, tran_type *tran, DB_TXN *txn); int bdb_run_logical_recovery(bdb_state_type *bdb_state, int locks_only); tran_type *bdb_tran_continue_logical(bdb_state_type *bdb_state, unsigned long long tranid, int trak, int *bdberr); tran_type *bdb_tran_start_logical_forward_roll(bdb_state_type *bdb_state, unsigned long long tranid, int trak, int *bdberr); tran_type *bdb_tran_start_logical(bdb_state_type *bdb_state, unsigned long long tranid, int trak, int *bdberr); tran_type *bdb_tran_start_logical_sc(bdb_state_type *bdb_state, unsigned long long tranid, int trak, int *bdberr); int ll_undo_add_ix_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, int ixnum, void *key, int keylen, DB_LSN *undolsn); int ll_undo_add_dta_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long genid, DB_LSN *undolsn, int dtafile, int dtastripe); int ll_undo_del_ix_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long genid, int ixnum, DB_LSN *undolsn, void *key, int keylen, void *dta, int dtalen); int ll_undo_del_dta_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long genid, DB_LSN *undolsn, int dtafile, int dtastripe, void *dta, int dtalen); int ll_undo_upd_dta_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long oldgenid, unsigned long long newgenid, void *olddta, int olddta_len, int dtafile, int dtastripe, DB_LSN *undolsn); int ll_undo_upd_ix_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, int ixnum, void *key, int keylen, void *dta, int dtalen, DB_LSN *undolsn, void *diff, int difflen, int suffix); int ll_undo_inplace_upd_dta_lk(bdb_state_type *bdb_state, tran_type *tran, char *table_name, unsigned long long oldgenid, unsigned long long newgenid, void *olddta, int olddta_len, int dtafile, int dtastripe, DB_LSN *undolsn); bdb_state_type *bdb_get_table_by_name(bdb_state_type *bdb_state, char *table); int bdb_llog_comprec(bdb_state_type *bdb_state, tran_type *tran, DB_LSN *lsn); DB *bdb_temp_table_gettbl(struct temp_table *tbl); void timeval_to_timespec(struct timeval *tv, struct timespec *ts); void add_millisecs_to_timespec(struct timespec *orig, int millisecs); int setup_waittime(struct timespec *waittime, int waitms); int bdb_keycontainsgenid(bdb_state_type *bdb_state, int ixnum); void send_filenum_to_all(bdb_state_type *bdb_state, int filenum, int nodelay); int bdb_get_file_lwm(bdb_state_type *bdb_state, tran_type *tran, DB_LSN *lsn, int *bdberr); int bdb_set_file_lwm(bdb_state_type *bdb_state, tran_type *tran, DB_LSN *lsn, int *bdberr); int bdb_delete_file_lwm(bdb_state_type *bdb_state, tran_type *tran, int *bdberr); int bdb_update_startlsn(struct tran_tag *intran, DB_LSN *firstlsn); int bdb_release_ltran_locks(bdb_state_type *bdb_state, struct tran_tag *ltran, int lockerid); /* Update the startlsn- you must be holding the translist_lk while calling this */ int bdb_update_startlsn_lk(bdb_state_type *bdb_state, struct tran_tag *intran, DB_LSN *firstlsn); tran_type *bdb_tran_begin_logical_int(bdb_state_type *bdb_state, unsigned long long tranid, int trak, int *bdberr); tran_type *bdb_tran_begin_logical_norowlocks_int(bdb_state_type *bdb_state, unsigned long long tranid, int trak, int *bdberr); tran_type *bdb_tran_begin_notxn_int(bdb_state_type *bdb_state, tran_type *parent, int *bdberr); tran_type *bdb_tran_begin_phys(bdb_state_type *bdb_state, tran_type *logical_tran); int bdb_tran_commit_phys(bdb_state_type *bdb_state, tran_type *tran); int bdb_tran_commit_phys_getlsn(bdb_state_type *bdb_state, tran_type *tran, DB_LSN *lsn); int bdb_tran_abort_phys(bdb_state_type *bdb_state, tran_type *tran); int bdb_tran_abort_phys_retry(bdb_state_type *bdb_state, tran_type *tran); /* for either logical or berk txns */ int bdb_tran_abort_int(bdb_state_type *bdb_state, tran_type *tran, int *bdberr, void *blkseq, int blklen, void *seqkey, int seqkeylen, int *priority); int ll_dta_del(bdb_state_type *bdb_state, tran_type *tran, int rrn, unsigned long long genid, DB *dbp, int dtafile, int dtastripe, DBT *dta_out); int form_stripelock_keyname(bdb_state_type *bdb_state, int stripe, char *keynamebuf, DBT *dbt_out); int form_rowlock_keyname(bdb_state_type *bdb_state, int ixnum, unsigned long long genid, char *keynamebuf, DBT *dbt_out); int form_keylock_keyname(bdb_state_type *bdb_state, int ixnum, void *key, int keylen, char *keynamebuf, DBT *dbt_out); void hexdumpdbt(DBT *dbt); void set_gblcontext(bdb_state_type *bdb_state, unsigned long long gblcontext); unsigned long long get_gblcontext(bdb_state_type *bdb_state); void bdb_bdblock_debug_init(bdb_state_type *bdb_state); /* berkdb creator function */ bdb_berkdb_t *bdb_berkdb_open(bdb_cursor_impl_t *cur, int type, int maxdata, int maxkey, int *bdberr); void bdb_update_ltran_lsns(bdb_state_type *bdb_state, DB_LSN regop_lsn, const void *args, unsigned int rectype); int update_shadows_beforecommit(bdb_state_type *bdb_state, DB_LSN *lsn, unsigned long long *commit_genid, int is_master); int timestamp_lsn_keycmp(void *_, int key1len, const void *key1, int key2len, const void *key2); /** * Return a cursor to a shadow file, either index or data * "create" indicate if the shadow file should be created (1) * or not (0) if it does not exist * Returned cursor is cached for further usage * */ tmpcursor_t *bdb_tran_open_shadow(bdb_state_type *bdb_state, int dbnum, tran_type *shadow_tran, int idx, int type, int create, int *bdberr); /** * Create the underlying shadow table as above, but do not return * a cursor. * */ void bdb_tran_open_shadow_nocursor(bdb_state_type *bdb_state, int dbnum, tran_type *shadow_tran, int idx, int type, int *bdberr); /** * Creates a shadow btree if needed (if there are backfill logs for it) * Returns the shadow btree if created/existing with the same semantics * as bdb_tran_open_shadow * */ tmpcursor_t *bdb_osql_open_backfilled_shadows(bdb_cursor_impl_t *cur, struct bdb_osql_trn *trn, int type, int *bdberr); /** * I need a temp table that does not jump at start after reaching * the end of file * */ int bdb_temp_table_next_norewind(bdb_state_type *bdb_state, struct temp_cursor *cursor, int *bdberr); int bdb_temp_table_prev_norewind(bdb_state_type *bdb_state, struct temp_cursor *cursor, int *bdberr); bdb_state_type *bdb_get_table_by_name_dbnum(bdb_state_type *bdb_state, char *table, int *dbnum); int bdb_get_lsn_lwm(bdb_state_type *bdb_state, DB_LSN *lsnout); void *bdb_cursor_dbcp(bdb_cursor_impl_t *cur); extern int gbl_temptable_pool_capacity; hash_t *bdb_temp_table_histhash_init(void); int bdb_temp_table_create_pool_wrapper(void **tblp, void *bdb_state_arg); int bdb_temp_table_destroy_pool_wrapper(void *tbl, void *bdb_state_arg); int bdb_temp_table_move(bdb_state_type *bdb_state, struct temp_cursor *cursor, int how, int *bdberr); int bdb_temp_table_keysize(struct temp_cursor *cursor); int bdb_temp_table_datasize(struct temp_cursor *cursor); void *bdb_temp_table_key(struct temp_cursor *cursor); void *bdb_temp_table_data(struct temp_cursor *cursor); int bdb_temp_table_stat(bdb_state_type *bdb_state, DB_MPOOL_STAT **gspp); int bdb_get_active_logical_transaction_lsns(bdb_state_type *bdb_state, DB_LSN **lsnout, int *numlsns, int *bdberr, tran_type *shadow_tran); unsigned long long get_lowest_genid_for_datafile(int file); int bdb_get_lid_from_cursortran(cursor_tran_t *curtran); DBC *get_cursor_for_cursortran_flags(cursor_tran_t *curtran, DB *db, u_int32_t flags, int *bdberr); int bdb_bdblock_debug_enabled(void); int bdb_reconstruct_add(bdb_state_type *state, DB_LSN *startlsn, void *key, int keylen, void *data, int datalen, int *p_outlen); int bdb_reconstruct_delete(bdb_state_type *state, DB_LSN *startlsn, int *page, int *index, void *key, int keylen, void *data, int datalen, int *outdatalen); int bdb_write_preamble(bdb_state_type *bdb_state, int *bdberr); int bdb_reconstruct_key_update(bdb_state_type *bdb_state, DB_LSN *startlsn, void **diff, int *offset, int *difflen); int bdb_reconstruct_inplace_update(bdb_state_type *bdb_state, DB_LSN *startlsn, void *allcd, int allcd_sz, int *offset, int *outlen, int *outpage, int *outidx); unsigned long long get_id(bdb_state_type *bdb_state); int bdb_get_active_stripe_int(bdb_state_type *bdb_state); void bdb_dump_active_locks(bdb_state_type *bdb_state, FILE *out); void bdb_dump_cursors(bdb_state_type *bdb_state, FILE *out); /* All flavors of rowlocks */ int bdb_lock_ix_value_write(bdb_state_type *bdb_state, tran_type *tran, int idx, DBT *dbt_key, DB_LOCK *lk, DBT *lkname); int bdb_lock_row_write_getlock(bdb_state_type *bdb_state, tran_type *tran, int idx, unsigned long long genid, DB_LOCK *dblk, DBT *lkname); int bdb_lock_row_write_getlock_fromlid(bdb_state_type *bdb_state, int lid, int idx, unsigned long long genid, DB_LOCK *lk, DBT *lkname); /* Minmax lock routines - these are locks on start/end of a file */ int bdb_lock_minmax(bdb_state_type *bdb_state, int ixnum, int stripe, int minmax, int how, DB_LOCK *dblk, DBT *lkname, int lid, int trylock); /* Get-lock protocol functions. These expect the cursor to be positioned. */ /* rowlocks have the fileid of the row baked into them */ int bdb_get_row_lock(bdb_state_type *bdb_state, int rowlock_lid, int idx, DBC *dbcp, unsigned long long genid, DB_LOCK *rlk, DBT *lkname, int how); int bdb_get_row_lock_pfunc(bdb_state_type *bdb_state, int rowlock_lid, int idx, DBC *dbcp, int (*pfunc)(void *), void *arg, unsigned long long genid, DB_LOCK *rlk, DBT *lkname, int how); int bdb_get_row_lock_minmaxlk(bdb_state_type *bdb_state, int rowlock_lid, DBC *dbcp, int idx, int stripe, int minmax, DB_LOCK *rlk, DBT *lkname, int how); int bdb_get_row_lock_minmaxlk_pfunc(bdb_state_type *bdb_state, int rowlock_lid, DBC *dbcp, int (*pfunc)(void *), void *arg, int idx, int stripe, int minmax, DB_LOCK *rlk, DBT *lkname, int how); int bdb_release_lock(bdb_state_type *bdb_state, DB_LOCK *rowlock); int bdb_release_row_lock(bdb_state_type *bdb_state, DB_LOCK *rowlock); int bdb_describe_lock_dbt(bdb_state_type *bdb_state, DBT *dbtlk, char *out, int outlen); int bdb_describe_lock(bdb_state_type *bdb_state, DB_LOCK *lk, char *out, int outlen); int bdb_lock_row_fromlid(bdb_state_type *bdb_state, int lid, int idx, unsigned long long genid, int how, DB_LOCK *dblk, DBT *lkname, int trylock); int bdb_lock_row_fromlid_int(bdb_state_type *bdb_state, int lid, int idx, unsigned long long genid, int how, DB_LOCK *dblk, DBT *lkname, int trylock, int flags); /* we use this structure to create a dummy cursor to be used for all * non-transactional cursors. it is defined below */ struct cursor_tran { unsigned int lockerid; int id; /* debugging */ }; void bdb_curtran_cursor_opened(cursor_tran_t *curtran); void bdb_curtran_cursor_closed(cursor_tran_t *curtran); int bdb_curtran_freed(cursor_tran_t *curtran); extern int __db_count_cursors(DB *db); extern int __dbenv_count_cursors_dbenv(DB_ENV *dbenv); int bdb_dump_log(DB_ENV *dbenv, DB_LSN *startlsn); int release_locks_for_logical_transaction_object(bdb_state_type *bdb_state, tran_type *tran, int *bdberr); extern int bdb_reconstruct_update(bdb_state_type *bdb_state, DB_LSN *startlsn, int *page, int *index, void *key, int keylen, void *data, int datalen); int tran_allocate_rlptr(tran_type *tran, DBT **ptr, DB_LOCK **lptr); int tran_deallocate_pop(tran_type *tran, int count); int tran_reset_rowlist(tran_type *tran); void unpack_index_odh(bdb_state_type *bdb_state, DBT *data, void *foundgenid, void *dta, int dtalen, int *reqdtalen, uint8_t *ver); int bdb_reopen_inline(bdb_state_type *); void bdb_setmaster(bdb_state_type *bdb_state, char *host); int __db_check_all_btree_cursors(DB *dbp, db_pgno_t pgno); void __db_err(const DB_ENV *dbenv, const char *fmt, ...); void call_for_election_and_lose(bdb_state_type *bdb_state); extern int gbl_sql_tranlevel_default; extern int gbl_sql_tranlevel_preserved; extern int gbl_rowlocks; extern int gbl_new_snapisol; extern int gbl_new_snapisol_asof; extern int gbl_new_snapisol_logging; extern int gbl_early; extern int gbl_udp; extern int gbl_prefault_udp; extern int gbl_verify_all_pools; typedef struct udppf_rq { bdb_state_type *bdb_state; unsigned int fileid; unsigned int pgno; } udppf_rq_t; void start_udp_reader(bdb_state_type *bdb_state); void *udpbackup_and_autoanalyze_thd(void *arg); int do_ack(bdb_state_type *bdb_state, DB_LSN permlsn, uint32_t generation); void berkdb_receive_rtn(void *ack_handle, void *usr_ptr, char *from_host, int usertype, void *dta, int dtalen, uint8_t is_tcp); void berkdb_receive_msg(void *ack_handle, void *usr_ptr, char *from_host, int usertype, void *dta, int dtalen, uint8_t is_tcp); void receive_coherency_lease(void *ack_handle, void *usr_ptr, char *from_host, int usertype, void *dta, int dtalen, uint8_t is_tcp); void receive_start_lsn_request(void *ack_handle, void *usr_ptr, char *from_host, int usertype, void *dta, int dtalen, uint8_t is_tcp); void receive_durable_lsn(void *ack_handle, void *usr_ptr, char *from_host, int usertype, void *dta, int dtalen, uint8_t is_tcp); uint8_t *rep_berkdb_seqnum_type_put(const seqnum_type *p_seqnum_type, uint8_t *p_buf, const uint8_t *p_buf_end); uint8_t *rep_udp_filepage_type_put(const filepage_type *p_filepage_type, uint8_t *p_buf, const uint8_t *p_buf_end); void poke_updateid(void *buf, int updateid); void bdb_genid_sanity_check(bdb_state_type *bdb_state, unsigned long long genid, int stripe); /* Request on the wire */ typedef struct pgcomp_snd { int32_t id; uint32_t size; /* payload */ } pgcomp_snd_t; enum { BDB_PGCOMP_SND_TYPE_LEN = 4 + 4 }; BB_COMPILE_TIME_ASSERT(bdb_pgcomp_snd_type, sizeof(pgcomp_snd_t) == BDB_PGCOMP_SND_TYPE_LEN); const uint8_t *pgcomp_snd_type_get(pgcomp_snd_t *p_snd, const uint8_t *p_buf, const uint8_t *p_buf_end); uint8_t *pgcomp_snd_type_put(const pgcomp_snd_t *p_snd, uint8_t *p_buf, const uint8_t *p_buf_end, const void *data); /* Page compact request on receiver's end */ typedef struct pgcomp_rcv { bdb_state_type *bdb_state; int32_t id; uint32_t size; char data[1]; } pgcomp_rcv_t; int enqueue_pg_compact_work(bdb_state_type *bdb_state, int32_t fileid, uint32_t size, const void *data); void add_dummy(bdb_state_type *); int bdb_add_dummy_llmeta(void); int bdb_have_ipu(bdb_state_type *bdb_state); typedef struct ack_info_t ack_info; void handle_tcp_timestamp(bdb_state_type *, ack_info *, char *to); void handle_tcp_timestamp_ack(bdb_state_type *, ack_info *); void handle_ping_timestamp(bdb_state_type *, ack_info *, char *to); unsigned long long bdb_logical_tranid(void *tran); int bdb_lite_list_records(bdb_state_type *bdb_state, int (*userfunc)(bdb_state_type *bdb_state, void *key, int keylen, void *data, int datalen, int *bdberr), int *bdberr); int bdb_osql_cache_table_versions(bdb_state_type *bdb_state, tran_type *tran, int trak, int *bdberr); int bdb_temp_table_destroy_lru(struct temp_table *tbl, bdb_state_type *bdb_state, int *last, int *bdberr); void wait_for_sc_to_stop(void); void bdb_temp_table_init(bdb_state_type *bdb_state); int is_incoherent(bdb_state_type *bdb_state, const char *host); int berkdb_start_logical(DB_ENV *dbenv, void *state, uint64_t ltranid, DB_LSN *lsn); int berkdb_commit_logical(DB_ENV *dbenv, void *state, uint64_t ltranid, DB_LSN *lsn); void send_coherency_leases(bdb_state_type *bdb_state, int lease_time, int *do_add); void udp_send_durable_lsn(bdb_state_type *bdb_state, DB_LSN *lsn, uint32_t gen); int bdb_durable_block(bdb_state_type *bdb_state, DB_LSN *commit_lsn, uint32_t original_gen, int wait_durable); int has_low_headroom(const char *path, int threshold, int debug); #endif /* __bdb_int_h__ */
35.203822
81
0.665641
[ "object" ]
aeb017dba5d8247d57525f4a30aa03ce2ff7952a
18,382
h
C
ONElib.h
richarddurbin/phynder
48b184f76c67985ef019ce199e1a1a7dafdc51a5
[ "MIT" ]
7
2019-12-20T16:52:50.000Z
2020-04-08T14:49:06.000Z
ONElib.h
richarddurbin/phynder
48b184f76c67985ef019ce199e1a1a7dafdc51a5
[ "MIT" ]
null
null
null
ONElib.h
richarddurbin/phynder
48b184f76c67985ef019ce199e1a1a7dafdc51a5
[ "MIT" ]
null
null
null
/****************************************************************************************** * * File: ONElib.h * Header for ONE file reading and writing * * Author: Richard Durbin (rd109@cam.ac.uk) * Copyright (C) Richard Durbin, Cambridge University, 2019 * * HISTORY: * Last edited: May 16 19:07 2020 (rd109) * * Dec 27 09:46 2019 (gene): style edits * * Created: Sat Feb 23 10:12:43 2019 (rd109) * *****************************************************************************************/ #ifndef ONE_DEFINED #define ONE_DEFINED #include <stdio.h> // for FILE etc. #include <inttypes.h> // for standard size int types and their PRI print macros #include <stdbool.h> // for standard bool types #include <limits.h> // for INT_MAX etc. #include <pthread.h> /*********************************************************************************** * * DATA TYPES * **********************************************************************************/ // Basic Types #ifndef U8_DEFINED #define U8_DEFINED typedef int64_t I64; typedef unsigned char U8; #endif // U8_DEFINED typedef enum { oneINT = 1, oneREAL, oneCHAR, oneSTRING, oneINT_LIST, oneREAL_LIST, oneSTRING_LIST, oneDNA } OneType; extern char* oneTypeString[] ; // = { 0, "INT", "REAL", "CHAR", "STRING", "INT_LIST", "REAL_LIST", "STRING_LIST", "DNA" } ; typedef union { I64 i; double r; char c; I64 len; // For lists : top 8 bits encode excess bytes, low 56 length } OneField; typedef struct { char *program; char *version; char *command; char *date; } OneProvenance; typedef struct { char *filename; I64 count; } OneReference; typedef struct { I64 count; I64 max; I64 total; I64 groupCount; I64 groupTotal; } OneCounts; // OneCodecs are a private package for binary one file compression typedef void OneCodec; // forward declaration of opaque type for compression codecs // DNAcodec is a special pre-existing compressor one should use for DNA. // It compresses every base to 2-bits, where any non-ACGT letter is // effectively converted to an A. Compression is case insensitive, // but decompression always delivers lower-case. extern OneCodec *DNAcodec; // Record for a particular line type. There is at most one list element. typedef struct { OneCounts accum; // counts read or written to this moment OneCounts given; // counts read from header I64 gCount; // used internally to calculate groupCount and groupTotal I64 gTotal; I64 oCount; // # of objects in prefix before first group (if any) I64 oTotal; // + of objects in prefix (these 2 are for thread parallel apps) int nField; // number of fields OneType *fieldType; // type of each field int listEltSize; // size of list field elements (if present, else 0) int listField; // field index of list char *comment; // the comment on the definition line in the schema bool isUserBuf; // flag for whether buffer is owned by user I64 bufSize; // system buffer and size if not user supplied void *buffer; OneCodec *fieldCodec; // compression codecs and flags OneCodec *listCodec; bool isUseFieldCodec; // on once enough data collected to train associated codec bool isUseListCodec; char binaryTypePack; // binary code for line type, bit 8 set. // bit 0: fields compressed // bit 1: list compressed I64 fieldTack; // accumulated training data for this threads fieldCodec (master) I64 listTack; // accumulated training data for this threads codeCodec (master) } OneInfo; // the schema type - the first record is the header spec, then a linked list of primary classes typedef struct OneSchema { char primary[4] ; int nSecondary ; char **secondary ; OneInfo *info[128] ; int nFieldMax ; char objectType ; char groupType ; struct OneSchema *nxt ; } OneSchema ; typedef struct OneHeaderText { char *text ; struct OneHeaderText *nxt ; } OneHeaderText ; // The main OneFile type - this is the primary handle used by the end user typedef struct { // this field may be set by the user bool isCheckString; // set if want to validate string char by char // these fields may be read by user - but don't change them! char fileType[4]; char subType[4]; char lineType; // current lineType char objectType; // line designation character for primary objects char groupType; // line designation character for groups (optional) I64 line; // current line number I64 byte; // current byte position when writing binary I64 object; // current object - incremented when object line read I64 group; // current group - incremented when group line read OneProvenance *provenance; // if non-zero then count['!'] entries OneReference *reference; // if non-zero then count['<'] entries OneReference *deferred; // if non-zero then count['>'] entries OneField *field; // used to hold the current line - accessed by macros OneInfo *info[128]; // all the per-linetype information I64 codecTrainingSize; // amount of data to see before building codec // fields below here are private to the package FILE *f; bool isWrite; // true if open for writing bool isHeaderOut; // true if header already written bool isBinary; // true if writing a binary file bool inGroup; // set once inside a group bool isLastLineBinary; // needed to deal with newlines on ascii files bool isIndexIn; // index read in bool isBig; // are we on a big-endian machine? char lineBuf[128]; // working buffers char numberBuf[32]; int nFieldMax; I64 codecBufSize; char *codecBuf; I64 linePos; // current line position OneHeaderText *headerText; // arbitrary descriptive text that goes with the header char binaryTypeUnpack[256]; // invert binary line code to ASCII line character. int share; // index if slave of threaded write, +nthreads > 0 if master int isFinal; // oneFinalizeCounts has been called on file pthread_mutex_t fieldLock; // Mutexs to protect training accumumulation stats when threadded pthread_mutex_t listLock; } OneFile; // the footer will be in the concatenated result. /*********************************************************************************** * * ROUTINES FOR READING & WRITING ONE FILES IN BOTH ASCII & BINARY (TRANSPARENTLY) * **********************************************************************************/ // CREATING AND DESTROYING SCHEMAS OneSchema *oneSchemaCreateFromFile (char *path) ; OneSchema *oneSchemaCreateFromText (char *text) ; // These functions create a schema handle that can be used to open One-code data files // for reading and writing. A schema file is itself a One-code file, consisting of // a set of objects, one per primary file type. Valid lines in this file are: // P <primary file type> // a string of length 3 // S <secondary file type> // a string of length 3 - any number of these // D <char> <field_list> // definition of line with uncompressed fields // C <char> <field_list> // definition of line with compressed fields // <char> must be a lower or upper case letter. Maximum one lower case letter // determines the group type. The first upper case letter definition determines // the objects in this file type. // <field_list> is a list of field types from: // CHAR, INT, REAL, STRING, INT_LIST, REAL_LIST, STRING_LIST, DNA // By convention comments on each line explain the definition. // Example, with lists and strings preceded by their length in OneCode style // P 3 seq this is a sequence file // D S 1 3 DNA the DNA sequence - each S line starts an object // D Q 1 6 STRING the phred encoded quality score + ASCII 33 // C N 4 4 REAL 4 REAL 4 REAL 4 REAL signal to noise ratio in A, C, G, T channels // D g 2 3 INT 6 STRING group designator: number of objects, name // The ...FromText() alternative writes the text to a temp file and reads it with // oneSchemaCreateFromFile(). This allows code to set the schema. // Internally a schema is a linked list of OneSchema objects, with the first holding // the (hard-coded) schema for the header and footer, and the remainder each // corresponding to one primary file type. void oneSchemaDestroy (OneSchema *schema) ; // READING ONE FILES: OneFile *oneFileOpenRead (const char *path, OneSchema *schema, char *type, int nthreads) ; // Open ONE file 'path', either binary or ascii encoded, for reading. // If the file doesn't have a header, then 'type' must be specified, // otherwise, if 'type' is non-zero it must match the header type. // All header information (if present) is read. // 'schema' is also optional. If it is NULL then the file must contain its own schema. // If 'schema' is present then it must support 'type', and if the file contains its // own schema, then that must be a subset of the one for this type in 'schema'. // If nthreads > 1 then nthreadds OneFiles are generated as an array and the pointer // to the first, called the master, is returned. The other nthreads-1 files are // called slaves. The package routines are aware of when a OneFile argument is a // slave or master in a parallel group. The master recieves provenance, counts, etc. // The slaves only read data and have the virture of sharing indices and codecs with // the master if relevant. bool oneFileCheckSchema (OneFile *vf, char *textSchema) ; // Checks if file schema is consistent with text schema. Mismatches are reported to stderr. // Filetype and all linetypes in text must match. File schema can contain additional linetypes. // e.g. if (! oneFileCheckSchema (vf, "P 3 seq\nD S 1 3 DNA\nD Q 1 6 STRING\nD P 0\n")) die () ; // This is provided to enable a program to ensure that its assumptions about data layout // are satisfied. char oneReadLine (OneFile *vf); // Read the next ONE formatted line returning the line type of the line, or 0 // if at the end of the data section. The content macros immediately below are // used to access the information of the line most recently read. #define oneInt(vf,x) ((vf)->field[x].i) #define oneReal(vf,x) ((vf)->field[x].r) #define oneChar(vf,x) ((vf)->field[x].c) #define _LF(vf) ((vf)->info[(int)(vf)->lineType]->listField) #define oneLen(vf) ((vf)->field[_LF(vf)].len & 0xffffffffffffffll) #define oneString(vf) (char *) ((vf)->info[(int) (vf)->lineType]->buffer) #define oneIntList(vf) (I64 *) ((vf)->info[(int) (vf)->lineType]->buffer) #define oneRealList(vf) (double *) ((vf)->info[(int) (vf)->lineType]->buffer) #define oneNextString(vf,s) (s + strlen(s) + 1) // Access field information. The index x of a list object is not required as there is // only one list per line, stored in ->buffer. // A "string list" is implicitly supported, get the first string with oneString, and // subsequent strings sequentially with oneNextString, e.g.: // // char *s = oneString(vf); // for (i = 0; i < oneLen(vf); i++) // { // do something with i'th string // s = oneNextString(vf,s); // } char *oneReadComment (OneFile *vf); // Can be called after oneReadLine() to read any optional comment text after the fixed fields. // Returns NULL if there is no comment. // WRITING ONE FILES: OneFile *oneFileOpenWriteNew (const char *path, OneSchema *schema, char *type, bool isBinary, int nthreads); OneFile *oneFileOpenWriteFrom (const char *path, OneFile *vfIn, bool isBinary, int nthreads); // Create a new oneFile that will be written to 'path'. For the 'New' variant supply // the file type, subtype (if non-zero), and whether it should be binary or ASCII. // For the 'From' variant, specify binary or ASCII, schema and all other header // information is inherited from 'vfIn', where the count stats are from vfIn's // accumulation (assumes vfIn has been fully read or written) if 'useAccum is true, // and from vfIn's header otherwise. // If nthreads > 1 then nthreads OneFiles are generated as an array and the pointer // to the first, called the master, is returned. The other nthreads-1 files are // called slaves. The package routines are aware of when a OneFile argument is a // slave or master in a parallel group. The slaves are expected to only write data // lines, with the master adding provenance, producing the header, and then some // segment of the initial data lines. Upon close the final result is effectively // the concatenation of the master, followed by the output of each slave in sequence. bool oneInheritProvenance (OneFile *vf, OneFile *source); bool oneInheritReference (OneFile *vf, OneFile *source); bool oneInheritDeferred (OneFile *vf, OneFile *source); // Add all provenance/reference/deferred entries in source to header of vf. Must be // called before call to oneWriteHeader. bool oneAddProvenance (OneFile *vf, char *prog, char *version, char *command, char *dateTime); bool oneAddReference (OneFile *vf, char *filename, I64 count); bool oneAddDeferred (OneFile *vf, char *filename); // Append provenance/reference/deferred to header information. Must be called before // call to oneWriteHeader. Current data & time filled in if 'dateTime' == NULL. void oneWriteHeader (OneFile *vf); // Write out the header for file. For ASCII output, if you want the header to contain // count information then you must create and fill the relevant OneCounts objects before // calling this. For binary output, the counts will be accumulated and output in a // footer upon oneClose. void oneWriteLine (OneFile *vf, char lineType, I64 listLen, void *listBuf); // Set up a line for output just as it would be returned by oneReadLine and then call // this routine to output the line (ASCII or binary). // Use the macros above on the l.h.s. of assignments to fill fields (e.g. oneInt(vf,2) = 3). // For lists, give the length in the listLen argument, and either place the list data in your // own buffer and give it as listBuf, or put in the line's buffer and set listBuf == NULL. void oneWriteComment (OneFile *vf, char *comment); // Adds a comment to the current line. Need to use this not fprintf() so as to keep the // index correct in binary mode. // CLOSING FILES (FOR BOTH READ & WRITE) void oneFileClose (OneFile *vf); // Close vf (opened either for reading or writing). Finalizes counts, merges theaded files, // and writes footer if binary. Frees all non-user memory associated with vf. // GOTO & BUFFER MANAGEMENT void oneUserBuffer (OneFile *vf, char lineType, void *buffer); // A buffer is used to capture the list element of each line type that has one. // This routine allows you to reassign the buffer to one you've allocated, or // to revert to a default system buffer if 'buffer' = NULL. The previous buffer // (if any) is freed. The user must ensure that a buffer they supply is large // enough. BTW, this buffer is overwritten with each new line read of the given type. bool oneGotoObject (OneFile *vf, I64 i); // Goto i'th object in the file. This only works on binary files, which have an index. I64 oneGotoGroup (OneFile *vf, I64 i); // Goto the first object in group i. Return the size (in objects) of the group, or 0 // if an error (i out of range or vf has not group type). Only works for binary files. /*********************************************************************************** * * A BIT ABOUT THE FORMAT OF BINARY FILES * **********************************************************************************/ // <bin file> <- <ASCII Prolog> <$-line> <binary data> <footer> <^-line> <footer-size:int64> // // '$'-line flags file is binary and gives endian // The data block ends with a blank line consisting of '\n' // // EWM: Removed '-' line, simply write off_t to footer start // // <ASCII Prolog> <- <'1'-line> [<'2'-line>] ( <'!'-line> | <'<'-line> | <'>'-line> )* // // The ASCII prolog contains the type, subtype, provenance, reference, and deferred lines // in the ASCII format. The ONE count statistic lines for each data line type are found // in the footer along with binary ';' and ':' lines that encode their compressors as // needed. The footer also contains binary '&' and '*' lines that encode the object index // and group indices, respectively. // // <Binary line> <- <Binary line code + tags> <fields> [<list data>] // // Line codes are >= 128 for binary encoded lines. The low two order bits of these are flags, // so each binary-encoded line type has 4 codes and a table maps these to the ASCII code. // Bit 0 indicates if the fields of the line type are compressed, and Bit 1 indicates if // the list data (if present) is compressed. // // If a field is a list, then the field array element for that field is the list's length // where the low 56 bits encode length, and the high 8 bits encode the # of high-order // 0-bytes in every list element if an INT_LIST (0 otherwise). #endif // ONE_DEFINED /******************* end of file **************/
46.302267
100
0.631596
[ "object" ]
aeb0e3744b5e569a9515b014ce7112d49dc54f30
2,879
h
C
pytorch/torch/csrc/jit/tensorexpr/loopnest.h
zhou3968322/dl-code-read
aca204a986dabe2755becff0f42de1082299d791
[ "MIT" ]
null
null
null
pytorch/torch/csrc/jit/tensorexpr/loopnest.h
zhou3968322/dl-code-read
aca204a986dabe2755becff0f42de1082299d791
[ "MIT" ]
null
null
null
pytorch/torch/csrc/jit/tensorexpr/loopnest.h
zhou3968322/dl-code-read
aca204a986dabe2755becff0f42de1082299d791
[ "MIT" ]
null
null
null
#pragma once #include <unordered_map> #include <unordered_set> #include <vector> #include <torch/csrc/WindowsTorchApiMacro.h> namespace torch { namespace jit { namespace tensorexpr { class Expr; class Var; class Buf; class Tensor; class Function; class Stmt; class For; class Block; class Store; class Dtype; class TORCH_API LoopNest { public: LoopNest(const std::vector<Tensor*>& output_tensors); Stmt* root_stmt() const { return root_stmt_; } std::vector<For*> getLoopStmtsFor(Tensor*) const; Stmt* getLoopBodyFor(Tensor*) const; bool hasLoopBodyFor(Tensor*) const; void vectorize(Stmt*); void computeInline(Stmt* s); void computeInlineWithRandom(Stmt* s); void prepareForCodegen(); void splitWithTail(For* f, int factor, For** outer, For** inner, For** tail); void splitWithMask(For* f, int factor, For** outer, For** inner); void reorderAxis(For* a, For* b); static void unroll(For* f, Stmt** unrolled); void setGPUBlockIndex(For* f, int idx); void setGPUThreadIndex(For* f, int idx); // Insert a temporary computation of statement S in the scope of loop AT. // S is assumed to be a Store or a Block containing a Store. Along with the // computation itself, this transformation inserts Alloc/Free statements for // the temporary buffer used in the computation. void computeAt(Stmt* s, For* at); void rfactor( const Expr* f, const Var* reduction_var, Block* insertion_point = nullptr /* optional */); private: std::vector<Tensor*> findAllNeededTensors( const std::vector<Tensor*>& tensors); Stmt* lowerToStmt(Tensor* t); Stmt* insertAllocFree(Stmt* stmt); std::unordered_set<Function*> inlined_functions_; std::unordered_set<Function*> inlined_random_functions_; std::unordered_map<Tensor*, Stmt*> tensor_to_stmt_; std::unordered_map<Stmt*, Tensor*> stmt_to_tensor_; Stmt* root_stmt_; std::unordered_set<Tensor*> output_tensors_; std::unordered_set<Tensor*> intermediate_tensors_; std::vector<const Buf*> temp_bufs_; // Holds the initializer Expr of buffers that have been initialized. std::unordered_map<const Buf*, const Expr*> buf_initializers_; }; TORCH_API Stmt* FlattenIndexes(Stmt* s); // TODO: Revisit this once we decide on how dependencies analysis should look // like. Maybe we would choose to use a different API and BufUse would be // removed, or if we decide to keep it we need to properly document its API. struct BufUse { Stmt* s; bool isStore; }; /* * Returns a map ( Buf -> uses of this Buf), uses are represented as vectors of * BufUse elements, which are Stmt* and a bool isStore flag. The order of uses * in the vectors reflects the order in which the uses appear in the given * statement. */ std::unordered_map<const Buf*, std::vector<BufUse>> findUses(Stmt* s); } // namespace tensorexpr } // namespace jit } // namespace torch
29.680412
79
0.727336
[ "vector" ]
aeb1598ceb92363816e38308002d00a28b5d2558
16,833
h
C
electron/common/DragAction.h
wenfeifei/miniblink49
2ed562ff70130485148d94b0e5f4c343da0c2ba4
[ "Apache-2.0" ]
5,964
2016-09-27T03:46:29.000Z
2022-03-31T16:25:27.000Z
electron/common/DragAction.h
w4454962/miniblink49
b294b6eacb3333659bf7b94d670d96edeeba14c0
[ "Apache-2.0" ]
459
2016-09-29T00:51:38.000Z
2022-03-07T14:37:46.000Z
electron/common/DragAction.h
w4454962/miniblink49
b294b6eacb3333659bf7b94d670d96edeeba14c0
[ "Apache-2.0" ]
1,006
2016-09-27T05:17:27.000Z
2022-03-30T02:46:51.000Z
#ifndef common_DragAction_h #define common_DragAction_h #include "content/ui/WebDropSource.h" #include "content/ui/WCDataObject.h" #include "base/COMPtr.h" #include "base/strings/string_util.h" #include <shobjidl.h> #include <shlguid.h> #include <ShellAPI.h> #include <shlobj.h> namespace atom { class DragAction : public IDropTarget { public: DragAction(wkeWebView webview, HWND viewWindow, int id) { m_id = id; m_refCount = 0; m_lastDropEffect = 0; m_mask = wkeWebDragOperationEvery; m_webview = webview; m_viewWindow = viewWindow; } static FORMATETC* getPlainTextWFormatType() { static FORMATETC textFormat = { CF_UNICODETEXT, 0, DVASPECT_CONTENT, -1, TYMED_HGLOBAL }; return &textFormat; } static FORMATETC* getPlainTextFormatType() { static FORMATETC textFormat = { CF_UNICODETEXT, 0, DVASPECT_CONTENT, -1, TYMED_HGLOBAL }; return &textFormat; } static wkeMemBuf* getPlainText(IDataObject* dataObject) { STGMEDIUM store; wkeMemBuf* text = nullptr; if (SUCCEEDED(dataObject->GetData(getPlainTextWFormatType(), &store))) { // Unicode text wchar_t* data = static_cast<wchar_t*>(::GlobalLock(store.hGlobal)); //text = wkeCreateStringW(data, wcslen(data)); std::string dataUtf8 = base::WideToUTF8(base::string16(data)); text = wkeCreateMemBuf(nullptr, (void*)dataUtf8.c_str(), dataUtf8.size()); GlobalUnlock(store.hGlobal); ReleaseStgMedium(&store); } else if (SUCCEEDED(dataObject->GetData(getPlainTextFormatType(), &store))) { // ASCII text char* data = static_cast<char*>(GlobalLock(store.hGlobal)); // text = wkeCreateStringW(L"", 0); // wkeSetString(text, data, strlen(data)); text = wkeCreateMemBuf(nullptr, (void*)data, strlen(data)); ::GlobalUnlock(store.hGlobal); ReleaseStgMedium(&store); } else { // FIXME: Originally, we called getURL() here because dragging and dropping files doesn't // populate the drag with text data. Per https://bugs.webkit.org/show_bug.cgi?id=38826, this // is undesirable, so maybe this line can be removed. text = nullptr; // wkeCreateStringW(L"", 0); // getURL(dataObject, nullptr); } return text; } static bool containsPlainText(IDataObject* pDataObject) { if (pDataObject) { HRESULT hr1 = pDataObject->QueryGetData(getPlainTextWFormatType()); HRESULT hr2 = pDataObject->QueryGetData(getPlainTextFormatType()); if (hr1 == S_OK || hr2 == S_OK) return true; } return false; } static FORMATETC* cfHDropFormat() { static FORMATETC urlFormat = { CF_HDROP, 0, DVASPECT_CONTENT, -1, TYMED_HGLOBAL }; return &urlFormat; } static bool containsFiles(IDataObject* pDataObject) { if (pDataObject) { HRESULT hr = pDataObject->QueryGetData(cfHDropFormat()); return hr == S_OK; } return false; } static DWORD dragOperationToDragCursor(wkeWebDragOperation op) { DWORD res = DROPEFFECT_NONE; if (op & wkeWebDragOperationCopy) res = DROPEFFECT_COPY; else if (op & wkeWebDragOperationLink) res = DROPEFFECT_LINK; else if (op & wkeWebDragOperationMove) res = DROPEFFECT_MOVE; else if (op & wkeWebDragOperationGeneric) res = DROPEFFECT_MOVE; //This appears to be the Firefox behaviour return res; } static void initWkeWebDragDataItem(wkeWebDragData::Item* item) { item->storageType = wkeWebDragData::Item::StorageTypeString; item->stringType = nullptr; // wkeCreateStringW(L"", 0); item->stringData = nullptr; // wkeCreateStringW(L"", 0); item->filenameData = nullptr; // wkeCreateStringW(L"", 0); item->displayNameData = nullptr; // wkeCreateStringW(L"", 0); item->binaryData = nullptr; item->title = nullptr; // wkeCreateStringW(L"", 0); item->fileSystemURL = nullptr; // wkeCreateStringW(L"", 0); item->fileSystemFileSize = 0; item->baseURL = nullptr; // wkeCreateStringW(L"", 0); } static void releaseWkeWebDragData(wkeWebDragData* data) { wkeFreeMemBuf(data->m_filesystemId); for (int i = 0; i < data->m_itemListLength; ++i) { wkeWebDragData::Item* item = &data->m_itemList[i]; wkeFreeMemBuf(item->stringType); wkeFreeMemBuf(item->stringData); wkeFreeMemBuf(item->filenameData); wkeFreeMemBuf(item->displayNameData); wkeFreeMemBuf(item->binaryData); wkeFreeMemBuf(item->title); wkeFreeMemBuf(item->fileSystemURL); wkeFreeMemBuf(item->baseURL); } } void onStartDragging( wkeWebView webView, void* param, wkeWebFrameHandle frame, const wkeWebDragData* wkeDragData, wkeWebDragOperationsMask mask, const void* image, const wkePoint* dragImageOffset ) { HRESULT hr = E_NOTIMPL; DWORD okEffect = draggingSourceOperationMaskToDragCursors(mask); DWORD effect = DROPEFFECT_NONE; //We liberally protect everything, to protect against a load occurring mid-drag COMPtr<IDragSourceHelper> helper; COMPtr<IDropSource> source; if ((content::WebDropSource::createInstance(&source)) < 0) return; content::WCDataObject* dataObjectPtr = nullptr; content::WCDataObject::createInstance(&dataObjectPtr); if (wkeDragData) { wkeWebDragData::Item* items = wkeDragData->m_itemList; for (int i = 0; i < wkeDragData->m_itemListLength; ++i) { wkeWebDragData::Item* it = &items[i]; if (wkeWebDragData::Item::StorageTypeString == it->storageType) { std::string type = " "; if (it->stringType && it->stringType->data && it->stringType->length != 0) type = std::string((const char*)it->stringType->data, it->stringType->length); std::string data = " "; if (it->stringData && it->stringData->data && it->stringData->length != 0) data = std::string((const char*)it->stringData->data, it->stringData->length); dataObjectPtr->writeString(type, data); } } } m_dragData = dataObjectPtr; hr = ::DoDragDrop(m_dragData.get(), source.get(), okEffect, &effect); POINT* screenPoint = new POINT(); ::GetCursorPos(screenPoint); POINT* clientPoint = new POINT(); *clientPoint = *screenPoint; ::ScreenToClient(m_viewWindow, clientPoint); wkeWebDragOperation operation = wkeWebDragOperationNone; if (hr == DRAGDROP_S_DROP) { if (effect & DROPEFFECT_COPY) operation = wkeWebDragOperationCopy; else if (effect & DROPEFFECT_LINK) operation = wkeWebDragOperationLink; else if (effect & DROPEFFECT_MOVE) operation = wkeWebDragOperationMove; } int id = m_id; wkeWebView webview = m_webview; ThreadCall::callBlinkThreadSync([id, webview, screenPoint, clientPoint, operation] { if (IdLiveDetect::get()->isLive(id)) wkeDragTargetEnd(webview, clientPoint, screenPoint, operation); delete screenPoint; delete clientPoint; }); hr = S_OK; } static wkeWebDragData* dropDataToWebDragData(IDataObject* pDataObject) { wkeWebDragData* result = new wkeWebDragData(); result->m_filesystemId = nullptr; result->m_itemListLength = 0; if (containsFiles(pDataObject)) { STGMEDIUM medium; if ((pDataObject->GetData(cfHDropFormat(), &medium)) < 0) return result; HDROP hDrop = static_cast<HDROP>(GlobalLock(medium.hGlobal)); int count = ::DragQueryFile(hDrop, 0xFFFFFFFF, NULL, 0); result->m_itemList = new wkeWebDragData::Item[count]; for (int i = 0; i < count; i++) { initWkeWebDragDataItem(&result->m_itemList[i]); int pathlength = ::DragQueryFile(hDrop, i, NULL, 0) + 1; if (pathlength >= MAX_PATH || pathlength <= 1) continue; std::vector<wchar_t> fileName; fileName.resize(pathlength); ::DragQueryFile(hDrop, i, &(fileName.at(0)), pathlength); result->m_itemList[i].storageType = wkeWebDragData::Item::StorageTypeFileSystemFile; std::string fileSystemURL = base::WideToUTF8(base::string16(&fileName.at(0), pathlength)); result->m_itemList[i].fileSystemURL = wkeCreateMemBuf(nullptr, (void*)fileSystemURL.c_str(), fileSystemURL.size()); result->m_itemListLength++; } ::DragFinish(hDrop); } else if (containsPlainText(pDataObject)) { result->m_itemList = new wkeWebDragData::Item(); initWkeWebDragDataItem(result->m_itemList); result->m_itemList->storageType = wkeWebDragData::Item::StorageTypeString; result->m_itemList->stringType = wkeCreateMemBuf(nullptr, "text/plain", strlen("text/plain")); result->m_itemList->stringData = getPlainText(pDataObject); result->m_itemListLength = 1; } return result; } static DWORD draggingSourceOperationMaskToDragCursors(wkeWebDragOperationsMask op) { DWORD result = DROPEFFECT_NONE; if (op == wkeWebDragOperationEvery) return DROPEFFECT_COPY | DROPEFFECT_LINK | DROPEFFECT_MOVE; if (op & wkeWebDragOperationCopy) result |= DROPEFFECT_COPY; if (op & wkeWebDragOperationLink) result |= DROPEFFECT_LINK; if (op & wkeWebDragOperationMove) result |= DROPEFFECT_MOVE; if (op & wkeWebDragOperationGeneric) result |= DROPEFFECT_MOVE; return result; } static wkeWebDragOperation keyStateToDragOperation(DWORD grfKeyState) { // Conforms to Microsoft's key combinations as documented for // IDropTarget::DragOver. Note, grfKeyState is the current // state of the keyboard modifier keys on the keyboard. See: // <http://msdn.microsoft.com/en-us/library/ms680129(VS.85).aspx>. wkeWebDragOperation operation = wkeWebDragOperationNone; // m_page->dragController().sourceDragOperation(); if ((grfKeyState & (MK_CONTROL | MK_SHIFT)) == (MK_CONTROL | MK_SHIFT)) operation = wkeWebDragOperationLink; else if ((grfKeyState & MK_CONTROL) == MK_CONTROL) operation = wkeWebDragOperationCopy; else if ((grfKeyState & MK_SHIFT) == MK_SHIFT) operation = wkeWebDragOperationGeneric; return operation; } // IDropTarget impl HRESULT __stdcall DragEnter(IDataObject* pDataObject, DWORD grfKeyState, POINTL pt, DWORD* pdwEffect) { if (!m_webview) return S_OK; m_dragData = nullptr; if (!m_dropTargetHelper) ::CoCreateInstance(CLSID_DragDropHelper, 0, CLSCTX_INPROC_SERVER, IID_IDropTargetHelper, (void**)&m_dropTargetHelper); if (m_dropTargetHelper) m_dropTargetHelper->DragEnter(m_viewWindow, pDataObject, (POINT*)&pt, *pdwEffect); POINT* screenPoint = new POINT(); ::GetCursorPos(screenPoint); POINT* clientPoint = new POINT(); *clientPoint = *screenPoint; ::ScreenToClient(m_viewWindow, clientPoint); int id = m_id; wkeWebView webview = m_webview; wkeWebDragData* data = dropDataToWebDragData(pDataObject); ThreadCall::callBlinkThreadAsync([data, id, webview, screenPoint, clientPoint, grfKeyState] { if (IdLiveDetect::get()->isLive(id)) { wkeWebDragOperation op = wkeDragTargetDragEnter(webview, data, clientPoint, screenPoint, keyStateToDragOperation(grfKeyState), keyStateToDragOperation(grfKeyState)); } releaseWkeWebDragData(data); delete screenPoint; delete clientPoint; }); *pdwEffect = DROPEFFECT_MOVE; //dragOperationToDragCursor(op); m_lastDropEffect = *pdwEffect; m_dragData = pDataObject; return S_OK; } HRESULT __stdcall DragOver(DWORD grfKeyState, POINTL pt, DWORD* pdwEffect) { if (!m_webview) return S_OK; if (m_dropTargetHelper) m_dropTargetHelper->DragOver((POINT*)&pt, *pdwEffect); if (m_dragData) { POINT* screenPoint = new POINT(); ::GetCursorPos(screenPoint); POINT* clientPoint = new POINT(); *clientPoint = *screenPoint; ::ScreenToClient(m_viewWindow, clientPoint); int id = m_id; wkeWebView webview = m_webview; ThreadCall::callBlinkThreadAsync([id, webview, screenPoint, clientPoint, grfKeyState] { if (IdLiveDetect::get()->isLive(id)) { wkeWebDragOperation op = wkeDragTargetDragOver(webview, clientPoint, screenPoint, wkeWebDragOperationEvery, keyStateToDragOperation(grfKeyState)); } delete screenPoint; delete clientPoint; }); *pdwEffect = DROPEFFECT_MOVE; //dragOperationToDragCursor(op); } else *pdwEffect = DROPEFFECT_NONE; m_lastDropEffect = *pdwEffect; return S_OK; } HRESULT __stdcall DragLeave() { if (m_dropTargetHelper) m_dropTargetHelper->DragLeave(); if (m_dragData) { int id = m_id; wkeWebView webview = m_webview; ThreadCall::callBlinkThreadAsync([id, webview] { if (!IdLiveDetect::get()->isLive(id) || !webview) return; wkeDragTargetDragLeave(webview); }); m_dragData = nullptr; } return S_OK; } HRESULT __stdcall Drop(IDataObject* pDataObject, DWORD grfKeyState, POINTL pt, DWORD* pdwEffect) { OutputDebugStringA("Drop\n"); if (!m_webview) return S_OK; if (m_dropTargetHelper) m_dropTargetHelper->Drop(pDataObject, (POINT*)&pt, *pdwEffect); m_dragData = 0; *pdwEffect = m_lastDropEffect; POINT* screenPoint = new POINT(); ::GetCursorPos(screenPoint); POINT* clientPoint = new POINT(); *clientPoint = *screenPoint; ::ScreenToClient(m_viewWindow, clientPoint); int id = m_id; wkeWebView webview = m_webview; ThreadCall::callBlinkThreadAsync([id, webview, screenPoint, clientPoint, grfKeyState] { if (IdLiveDetect::get()->isLive(id) && webview) wkeDragTargetDrop(webview, clientPoint, screenPoint, keyStateToDragOperation(grfKeyState)); delete screenPoint; delete clientPoint; }); return S_OK; } HRESULT __stdcall QueryInterface(REFIID riid, void** ppvObject) { if (!ppvObject) return E_POINTER; *ppvObject = nullptr; if (IsEqualGUID(riid, IID_IDropTarget)) *ppvObject = static_cast<IDropTarget*>(this); else return E_NOINTERFACE; AddRef(); return S_OK; } ULONG __stdcall AddRef() { return ++m_refCount; } ULONG __stdcall Release() { return --m_refCount; } ULONG getRefCount() const { return m_refCount; } private: long m_refCount; int m_id; wkeWebView m_webview; HWND m_viewWindow; DWORD m_lastDropEffect; wkeWebDragOperationsMask m_mask; COMPtr<IDataObject> m_dragData; COMPtr<IDropTargetHelper> m_dropTargetHelper; }; } #endif // common_DragAction_h
36.753275
132
0.589794
[ "vector" ]
aeb2e0cfcf03873431b5c28957aaef5d13463d25
6,727
c
C
slider.c
BilkentCompGen/mam
5d935c8f2d5c2d78639fbfd7c44a30172132076d
[ "BSD-3-Clause" ]
null
null
null
slider.c
BilkentCompGen/mam
5d935c8f2d5c2d78639fbfd7c44a30172132076d
[ "BSD-3-Clause" ]
null
null
null
slider.c
BilkentCompGen/mam
5d935c8f2d5c2d78639fbfd7c44a30172132076d
[ "BSD-3-Clause" ]
null
null
null
/* MaM : Multiple alignment Manipulator Implemented by: Can ALKAN & Eray TUZUN [ calkan@gmail.com ] [ eraytuzun@gmail.com ] Last Update: Oct 18, 2005 Summary: parsimony score Summary: config file, run_prog (Oct 18, 2005) */ #include <stdio.h> #include <string.h> #include <ctype.h> #include <stdlib.h> #include <unistd.h> #include "main.h" #define pairwise 0 #define complete 1 int *flag; float slide(int, char **, int); float divergence(char *, char *, int); void SkipNlines(int n,FILE *in); int exonloc(int); FILE *gfopen(char *, char *); void plotout(char *,int,char *,char *,char *); position *slidepos; int posno=0; int SLIDE_WIDTH; int WINDOW_SIZE; void slider(char *arg1, char *arg2, char *arg3, char *arg4, char *arg5){ //argv[1]; alignment file //argv[2]; exonlocation file //argv[3]; P/C pairwise or complete deletion or parsimony score //argv[4]; Slide width //argv[5]; Window size int iterator; //To get the begin&end locations of exonfile int i=0; int k=0; FILE *unique; FILE *repeats; FILE *exon; float stddif; //error rate flag = (int *) malloc(maxLen * sizeof (int)); repeats = gfopen("repeats","w"); //exonfile unique = gfopen("unique","w"); //non-exon file SLIDE_WIDTH=atoi(arg4); WINDOW_SIZE=atoi(arg5); exon=gfopen(arg2,"r"); //to be changed slidepos = (position *) malloc(sizeof(position) * arraysize); while(fscanf(exon, "%d", &iterator) >0){ slidepos[posno].begin=iterator; fscanf(exon,"%d",&iterator); slidepos[posno++].end=iterator; } for (i=0;i<maxLen;i++) //Initialize flag[i]=1; for (i=0;i<seqTot;i++){ for (k=0; k<maxLen; k++){ if ((seqs[i][k]=='-') && !(strcmp(arg3,"C"))) //If complete deletion requested flag[k] = 0; else if (!strcmp(arg3, "S")) // if parsimony score flag[k] = 2; } } for(i=0;i<strlen(seqs[0]);i+=SLIDE_WIDTH){ printf("\rSliding Windows %f%%",(((float)(i+1)/(float)(strlen(seqs[0])))*100)); stddif = slide(i, seqs, seqTot); if (exonloc(i)){ fprintf(repeats, "%d %f\n", i,stddif); fprintf(unique, "%d %f\n", i,0.0); } else{ fprintf(repeats, "%d %f\n", i,0.0); fprintf(unique, "%d %f\n", i,stddif); } } fclose(repeats); fclose(unique); fclose(exon); plotout("plotfile",0,arg1,arg2,arg3); run_prog(PATH_GNUPLOT, "-persist plotfile"); /* system("gnuplot -persist plotfile"); */ }//slider int exonloc(int position) { int j; for (j=0;j<posno;j++) if ((position<slidepos[j].end) && (position>slidepos[j].begin)) return 1; return 0; } //exonloc float slide(int startpos, char **seqs, int seqtot){ int i, j; float r; float totalscore=0.0; float avgscore=0.0; int a,t,g,c,n; int max; int whichmax; //n:0, a:1 c:2 g:3 t:4 int parscore=0; if (flag[0] == 2){ // parsimony score for (j=startpos;j<startpos+WINDOW_SIZE;j++){ a = t = g = c = n = 0; for (i=0;i<seqtot;i++){ if (tolower(seqs[i][j]) == '-') n++; else if (tolower(seqs[i][j]) == 'a') a++; else if (tolower(seqs[i][j]) == 'c') c++; else if (tolower(seqs[i][j]) == 'g') g++; else if (tolower(seqs[i][j]) == 't') t++; } max = n; whichmax=0; if (a>max) {max=a; whichmax=1;} if (c>max) {max=c; whichmax=2;} if (g>max) {max=g; whichmax=3;} if (t>max) {max=t; whichmax=4;} switch(whichmax){ case 0: parscore=a+c+g+t; break; case 1: parscore=n+c+g+t; break; case 2: parscore=a+n+g+t; break; case 3: parscore=a+c+n+t; break; case 4: parscore=a+c+g+n; break; } totalscore+=(float)parscore/(float)WINDOW_SIZE; } avgscore=(float)totalscore/(float)seqtot; } else{ for (i=0;i<seqtot;i++){ for (j=0;j<i;j++){ r=divergence(seqs[i],seqs[j],startpos); totalscore+=r; } //inner for } //outer for avgscore=((float)totalscore/(float)((seqtot-1)*seqtot*2)); } return avgscore; } // slide float divergence(char *S, char *T, int startpos){ //Flag=0 pairwise deletion //Flag=1 complete deletion if there is a '-' in one of the S[i] we discard the whole i column. //Flag=2 parsimony score int i; int div=0; int length=0; float percentage; int endpos=strlen(seqs[0]); if ((startpos+WINDOW_SIZE)<endpos) endpos=startpos+WINDOW_SIZE; for (i=startpos;i<endpos;i++){ if (flag[i] == 1) //Else it is a complete deletion dont count this position in calculations { // If S[i]=='-' or T[i]=='-' we discard the position. if (S[i] != T[i] && S[i]!='-' && T[i]!='-') { div++; length++; } if (S[i]==T[i] && S[i]!='-' && T[i]!='-') length++; } } if (div==0) percentage=0.0; else percentage = ((float)div/(float)length); return percentage; } // divergence void SkipNlines(int n,FILE *in){ char ch; int linecount=0; //Skips first n lines while(fscanf(in,"%c",&ch) > 0){ if (ch=='\n') linecount++; if (linecount==n) break; } } // skipNlines FILE *gfopen(char *fname, char *mode){ //Gracefully file open, gives an error message if it can't open else returns file pointer FILE *fp; if ((fp=fopen(fname,mode))==0){ printf("Cannot open %s \n",fname); die(); } return fp; } // gfopen void plotout(char *plotfile,int printflag,char *alnname,char *tablename, char *porc){ FILE *plot; char psfile[200]; plot=gfopen(plotfile,"w"); //gnuplot file //Plot information comes here sprintf(psfile,"%s.%s",alnname,tablename); if (porc[0]=='S') fprintf(plot,"set title \"DivergenceRate Graph with \'%s\' cDNA file with parsimony score SlideWidth: \'%d\' WindowWidth: \'%d\' \" \n",tablename,SLIDE_WIDTH, WINDOW_SIZE); else fprintf(plot,"set title \"DivergenceRate Graph with \'%s\' cDNA file with \'%s\' deletion SlideWidth: \'%d\' WindowWidth: \'%d\' \" \n",tablename,porc,SLIDE_WIDTH, WINDOW_SIZE); fprintf(plot,"set xlabel \"LengthofAlignment\" \n"); fprintf(plot,"set ylabel \"DivergenceRate\" \n"); fprintf(plot,"plot \"unique\" with impulses, \"repeats\" with boxes\n"); fprintf(plot,"set terminal postscript color\n"); fprintf(plot,"set output \"%s.ps\"\n",psfile); fprintf(plot,"replot"); fclose(plot); } // plotout /* set palette set palette { { gray | color } { gamma <gamma> } { rgbformulae <r>,<g>,<b> | defined { ( <gray1> <color1> {, <grayN> <colorN>}... ) } | file '<filename>' {datafile-modifiers} | functions <R>,<G>,<B> } { model { RGB | HSV | CMY | YIQ | XYZ } } set palette model CMY */
24.641026
183
0.582132
[ "model" ]
aeb4f7542ba36ff3cb45e76e4c839f7985cf6ad6
2,246
h
C
src/pointcloudToLod/tileToLod.h
ProjSEED/lodToolkit
562ef77ea2b25ef1cccbe73fbd2fbae232137882
[ "MIT" ]
60
2020-10-19T08:13:10.000Z
2022-03-18T16:24:59.000Z
src/pointcloudToLod/tileToLod.h
yuancaimaiyi/lodToolkit
d840c18426c843a862cda41681006c4d1888696b
[ "MIT" ]
19
2020-10-19T02:49:28.000Z
2022-01-10T06:33:54.000Z
src/pointcloudToLod/tileToLod.h
yuancaimaiyi/lodToolkit
d840c18426c843a862cda41681006c4d1888696b
[ "MIT" ]
21
2020-10-27T04:59:18.000Z
2022-03-25T07:11:51.000Z
#pragma once #include "pointCI.h" #include <osg/BoundingBox> #include <osg/ref_ptr> #include <osg/Geode> #include <osg/Geometry> #include <osg/PagedLOD> #include <osg/ProxyNode> #include <osg/Vec3> #include <osg/Vec4> #include <osg/MatrixTransform> #include <osg/LineWidth> #include <osg/Point> #include <osgDB/ReaderWriter> #include <osgDB/WriteFile> #include <osgDB/FileUtils> namespace seed { namespace io { class PointVisitor; enum AxisType { X = 0, Y, Z }; struct AxisInfo { int aixType; double max; double min; }; enum ColorMode { Debug = 0, RGB = 1, IntensityGrey = 2, IntensityBlueWhiteRed = 3, IntensityHeightBlend = 4 }; enum ExportMode { OSGB = 0, _3MX = 1 }; class TileToLOD { public: TileToLOD(unsigned int maxTreeLevel, unsigned int maxPointNumPerOneNode, double lodRatio, float pointSize, osg::BoundingBox boundingBoxGlobal, ColorMode colorMode) { _maxTreeLevel = maxTreeLevel; _maxPointNumPerOneNode = maxPointNumPerOneNode; _lodRatio = lodRatio; _pointSize = pointSize; _boundingBoxGlobal = boundingBoxGlobal; _colorMode = colorMode; CreateColorBar(); } void CreateColorBar(); bool Generate(const std::vector<PointCI> *pointSet, const std::string& saveFilePath, const std::string& strBlock, ExportMode exportMode, osg::BoundingBox& boundingBoxLevel0); protected: unsigned int _maxTreeLevel; unsigned int _maxPointNumPerOneNode; double _lodRatio; float _pointSize; osg::BoundingBox _boundingBoxGlobal; ColorMode _colorMode; osg::Vec4 _colorBar[256]; AxisInfo FindMaxAxis(osg::BoundingBox boundingBox, osg::BoundingBox& boundingBoxLeft, osg::BoundingBox& boundingBoxRight); bool BuildNode(const std::vector<PointCI> *pointSet, std::vector<unsigned int> &pointIndex, osg::BoundingBox boundingBox, osg::BoundingBox boundingBoxLevel0, const std::string& saveFilePath, const std::string& strBlock, unsigned int level, unsigned int childNo, ExportMode exportMode); osg::Geode *MakeNodeGeode(const std::vector<PointCI> *pointSet, std::vector<unsigned int> &pointIndex, ExportMode exportMode); }; }; };
20.990654
126
0.703028
[ "geometry", "vector" ]
aeb7e9b1a03d580d66279051174308657e581152
1,716
h
C
Implementations/content/graphs (12)/Fundamentals/ManhattanMST.h
xennygrimmato/USACO
9378d6be8db82ef111c49b1fff3b6827c577561c
[ "MIT" ]
null
null
null
Implementations/content/graphs (12)/Fundamentals/ManhattanMST.h
xennygrimmato/USACO
9378d6be8db82ef111c49b1fff3b6827c577561c
[ "MIT" ]
null
null
null
Implementations/content/graphs (12)/Fundamentals/ManhattanMST.h
xennygrimmato/USACO
9378d6be8db82ef111c49b1fff3b6827c577561c
[ "MIT" ]
1
2020-03-11T05:02:25.000Z
2020-03-11T05:02:25.000Z
/** * Description: Compute minimum spanning tree of points where edges are manhattan distances * Time: O(N\log N) * Source: Rezwan Arefin * Verification: * https://open.kattis.com/problems/gridmst * CSA 84 The Sprawl * TC 760 ComponentsForever */ #include "MST (7.6).h" int N; vector<array<int,3>> cur; vector<pair<ll,pi>> ed; vi ind; struct { map<int,pi> m; void upd(int a, pi b) { auto it = m.lb(a); if (it != m.end() && it->s <= b) return; m[a] = b; it = m.find(a); while (it != m.begin() && prev(it)->s >= b) m.erase(prev(it)); } pi query(int y) { // over all a > y // get min possible value of b auto it = m.ub(y); if (it == m.end()) return {2*MOD,2*MOD}; return it->s; } } S; void solve() { sort(all(ind),[](int a, int b) { return cur[a][0] > cur[b][0]; }); S.m.clear(); int nex = 0; trav(x,ind) { // cur[x][0] <= ?, cur[x][1] < ? while (nex < N && cur[ind[nex]][0] >= cur[x][0]) { int b = ind[nex++]; S.upd(cur[b][1],{cur[b][2],b}); } pi t = S.query(cur[x][1]); if (t.s != 2*MOD) ed.pb({(ll)t.f-cur[x][2],{x,t.s}}); } } ll mst(vpi v) { N = sz(v); cur.rsz(N); ed.clear(); ind.clear(); F0R(i,N) ind.pb(i); sort(all(ind),[&v](int a, int b) { return v[a] < v[b]; }); F0R(i,N-1) if (v[ind[i]] == v[ind[i+1]]) ed.pb({0,{ind[i],ind[i+1]}}); F0R(i,2) { // ok to consider just two quadrants? F0R(i,N) { auto a = v[i]; cur[i][2] = a.f+a.s; } F0R(i,N) { // first octant auto a = v[i]; cur[i][0] = a.f-a.s; cur[i][1] = a.s; } solve(); F0R(i,N) { // second octant auto a = v[i]; cur[i][0] = a.f; cur[i][1] = a.s-a.f; } solve(); trav(a,v) a = {a.s,-a.f}; // rotate 90 degrees, repeat } return kruskal(N,ed); }
23.189189
92
0.5169
[ "vector" ]
aeb8e1423d28b044c555ee3bc03f82feef0c013b
99,123
c
C
Rtl/Rtl.c
tpn/dictionary
309b3731f79612f3e287c7ef2261f60e5c953704
[ "MIT" ]
2
2019-08-30T15:55:00.000Z
2020-12-27T08:35:04.000Z
Rtl/Rtl.c
tpn/dictionary
309b3731f79612f3e287c7ef2261f60e5c953704
[ "MIT" ]
null
null
null
Rtl/Rtl.c
tpn/dictionary
309b3731f79612f3e287c7ef2261f60e5c953704
[ "MIT" ]
4
2018-06-04T09:20:18.000Z
2021-11-01T14:54:31.000Z
/*++ Copyright (c) 2016 Trent Nelson <trent@trent.me> Module Name: Rtl.c Abstract: This module provides implementations for most Rtl (Run-time Library) routines. --*/ #include "stdafx.h" // // Keep Cu.h out of the pre-compiled header for now whilst it's in constant // fluctuation. // #include "Cu.h" // // Temp hack: need to include RtlConstants.c directly. // #include "RtlConstants.c" static PRTL_COMPARE_STRING _RtlCompareString = NULL; INIT_ONCE InitOnceSystemTimerFunction = INIT_ONCE_STATIC_INIT; PVECTORED_EXCEPTION_HANDLER VectoredExceptionHandler = NULL; INIT_ONCE InitOnceCSpecificHandler = INIT_ONCE_STATIC_INIT; CONST static UNICODE_STRING ExtendedLengthVolumePrefixW = \ RTL_CONSTANT_STRING(L"\\\\?\\"); CONST static STRING ExtendedLengthVolumePrefixA = \ RTL_CONSTANT_STRING("\\\\?\\"); // // As we don't link to the CRT, we don't get a __C_specific_handler entry, // which the linker will complain about as soon as we use __try/__except. // What we do is define a __C_specific_handler_impl pointer to the original // function (that lives in ntdll), then implement our own function by the // same name that calls the underlying impl pointer. In order to do this // we have to disable some compiler/linker warnings regarding mismatched // stuff. // static P__C_SPECIFIC_HANDLER __C_specific_handler_impl = NULL; #pragma warning(push) #pragma warning(disable: 4028 4273 28251) EXCEPTION_DISPOSITION __cdecl __C_specific_handler( PEXCEPTION_RECORD ExceptionRecord, ULONG_PTR Frame, PCONTEXT Context, struct _DISPATCHER_CONTEXT *Dispatch ) { return __C_specific_handler_impl(ExceptionRecord, Frame, Context, Dispatch); } #pragma warning(pop) _Use_decl_annotations_ PVOID CopyToMemoryMappedMemory( PRTL Rtl, PVOID Destination, LPCVOID Source, SIZE_T Size ) { // // Writing to memory mapped memory could raise a STATUS_IN_PAGE_ERROR // if there has been an issue with the backing store (such as memory // mapping a file on a network drive, then having the network fail, // or running out of disk space on the volume). Catch such exceptions // and return NULL. // __try { return Rtl->RtlCopyMemory(Destination, Source, Size); } __except (GetExceptionCode() == STATUS_IN_PAGE_ERROR ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { return NULL; } } BOOL TestExceptionHandler(VOID) { // // Try assigning '1' to the memory address 0x10. // __try { (*(volatile *)(PCHAR)10) = '1'; } __except (GetExceptionCode() == STATUS_ACCESS_VIOLATION ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) { return TRUE; } // // This should be unreachable. // return FALSE; } _Use_decl_annotations_ BOOL PrefaultPages( PVOID Address, ULONG NumberOfPages ) { ULONG Index; PCHAR Pointer = Address; TRY_MAPPED_MEMORY_OP { for (Index = 0; Index < NumberOfPages; Index++) { PrefaultPage(Pointer); Pointer += PAGE_SIZE; } } CATCH_STATUS_IN_PAGE_ERROR { return FALSE; } return TRUE; } FIND_AND_REPLACE_BYTE FindAndReplaceByte; _Use_decl_annotations_ ULONGLONG RtlFindAndReplaceByte( ULONGLONG SizeOfBufferInBytes, PBYTE Buffer, BYTE Find, BYTE Replace ) { BYTE Byte; ULONG Mask; ULONG Count; ULONGLONG Total; ULONGLONG Index; ULONGLONG TrailingBytes; ULONGLONG NumberOfYmmWords; YMMWORD Chunk; YMMWORD Found; YMMWORD Replaced; YMMWORD FindYmm; YMMWORD ReplaceYmm; PYMMWORD BufferYmm; PBYTE Dest; PBYTE TrailingBuffer; TrailingBytes = SizeOfBufferInBytes % sizeof(YMMWORD); NumberOfYmmWords = SizeOfBufferInBytes >> 5; FindYmm = _mm256_broadcastb_epi8(_mm_set1_epi8(Find)); ReplaceYmm = _mm256_broadcastb_epi8(_mm_set1_epi8(Replace)); BufferYmm = (PYMMWORD)Buffer; Total = 0; if (NumberOfYmmWords) { for (Index = 0; Index < NumberOfYmmWords; Index++) { // // Load a 32 byte chunk of the input buffer. // Chunk = _mm256_load_si256(BufferYmm + Index); // // Intersect the buffer with the character to find. // Found = _mm256_cmpeq_epi8(Chunk, FindYmm); // // Create a mask and then do a popcount to determine how many // bytes were matched. // Mask = _mm256_movemask_epi8(Found); Count = __popcnt(Mask); if (Count != 0) { // // Blend the chunk with replacement characters via the mask we // just generated. // Replaced = _mm256_blendv_epi8(Chunk, ReplaceYmm, Found); // // Store this copy back in memory. // _mm256_store_si256(BufferYmm + Index, Replaced); // // Update the total count. // Total += Count; } } } if (TrailingBytes) { TrailingBuffer = Buffer + (SizeOfBufferInBytes - TrailingBytes); for (Index = 0; Index < TrailingBytes; Index++) { Dest = TrailingBuffer + Index; Byte = *Dest; if (Byte == Find) { *Dest = Replace; ++Total; } } } return Total; } BOOL LoadShlwapiFunctions( _In_ HMODULE ShlwapiModule, _In_ PSHLWAPI_FUNCTIONS ShlwapiFunctions ) { if (!ARGUMENT_PRESENT(ShlwapiModule)) { return FALSE; } if (!ARGUMENT_PRESENT(ShlwapiFunctions)) { return FALSE; } #define TRY_RESOLVE_SHLWAPI_FUNCTION(Type, Name) ( \ ShlwapiFunctions->##Name = (Type)( \ GetProcAddress( \ ShlwapiModule, \ #Name \ ) \ ) \ ) #define RESOLVE_SHLWAPI_FUNCTION(Type, Name) \ if (!TRY_RESOLVE_SHLWAPI_FUNCTION(Type, Name)) { \ OutputDebugStringA("Failed to resolve Shlwapi!" #Name "\n"); \ return FALSE; \ } RESOLVE_SHLWAPI_FUNCTION(PPATH_CANONICALIZEA, PathCanonicalizeA); return TRUE; } RTL_API BOOL LoadShlwapi(PRTL Rtl) { if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (Rtl->ShlwapiModule) { return TRUE; } if (!(Rtl->ShlwapiModule = LoadLibraryA("shlwapi"))) { return FALSE; } return LoadShlwapiFunctions(Rtl->ShlwapiModule, &Rtl->ShlwapiFunctions); } RTL_API BOOL LoadDbgHelp(PRTL Rtl) { if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (Rtl->DbgHelpModule) { return TRUE; } if (!(Rtl->DbgHelpModule = LoadLibraryA("dbghelp"))) { OutputDebugStringA("Rtl: Failed to load dbghelp."); return FALSE; } return ResolveDbgHelpFunctions(Rtl, Rtl->DbgHelpModule, &Rtl->Dbg); } RTL_API BOOL InitializeCom( _In_ PRTL Rtl ) { if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (Rtl->Flags.ComInitialized) { return TRUE; } if (!(Rtl->Ole32Module = LoadLibraryA("ole32"))) { OutputDebugStringA("Rtl: Failed to load ole32."); return FALSE; } if (!(Rtl->CoInitializeEx = (PCO_INITIALIZE_EX) GetProcAddress(Rtl->Ole32Module, "CoInitializeEx"))) { OutputDebugStringA("Failed to resolve CoInitializeEx.\n"); return FALSE; } Rtl->Flags.ComInitialized = TRUE; return TRUE; } RTL_API BOOL LoadDbgEng( _In_ PRTL Rtl ) { PCSZ Path = ( "C:\\Program Files (x86)\\Windows Kits\\" "10\\Debuggers\\x64\\dbgeng.dll" ); if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (Rtl->DbgEngModule) { return TRUE; } if (!Rtl->InitializeCom(Rtl)) { return FALSE; } if (!(Rtl->DbgEngModule = LoadLibraryA(Path))) { OutputDebugStringA("Rtl: Failed to load dbgeng."); return FALSE; } else if (!(Rtl->DbgEngModule = LoadLibraryA("dbgeng"))) { OutputDebugStringA("Rtl: Failed to load dbgeng."); return FALSE; } if (!(Rtl->DebugCreate = (PDEBUG_CREATE) GetProcAddress(Rtl->DbgEngModule, "DebugCreate"))) { OutputDebugStringA("DbgEng: failed to resolve 'DebugCreate'"); return FALSE; } return TRUE; } _Use_decl_annotations_ BOOL ResolveNvcudaFunctions( PRTL Rtl, HMODULE NvcudaModule, PCU_FUNCTIONS CuFunctions ) { BOOL Success; ULONG NumberOfResolvedSymbols; ULONG ExpectedNumberOfResolvedSymbols; PULONG_PTR Functions = (PULONG_PTR)CuFunctions; #ifdef Names #undef Names #endif #define Names CuFunctionNames ULONG BitmapBuffer[(ALIGN_UP(ARRAYSIZE(Names), sizeof(ULONG) << 3) >> 5)+1]; RTL_BITMAP FailedBitmap = { ARRAYSIZE(Names)+1, (PULONG)&BitmapBuffer }; ExpectedNumberOfResolvedSymbols = ARRAYSIZE(Names); Success = LoadSymbols( Names, ARRAYSIZE(Names), Functions, sizeof(*CuFunctions) / sizeof(ULONG_PTR), NvcudaModule, &FailedBitmap, TRUE, &NumberOfResolvedSymbols ); if (!Success) { __debugbreak(); } if (ExpectedNumberOfResolvedSymbols != NumberOfResolvedSymbols) { PCSTR FirstFailedSymbolName; ULONG FirstFailedSymbol; ULONG NumberOfFailedSymbols; NumberOfFailedSymbols = Rtl->RtlNumberOfSetBits(&FailedBitmap); FirstFailedSymbol = Rtl->RtlFindSetBits(&FailedBitmap, 1, 0); FirstFailedSymbolName = Names[FirstFailedSymbol-1]; __debugbreak(); } #undef Names return TRUE; } RTL_API BOOL GetCu( PRTL Rtl, PCU *CuPointer ) { BOOL Success; PCU Cu; CU_RESULT Result; if (!Rtl->Flags.NvcudaInitialized) { Rtl->NvcudaModule = LoadLibraryA("nvcuda"); if (!Rtl->NvcudaModule) { return FALSE; } Cu = Rtl->Cu = (PCU)HeapAlloc(Rtl->HeapHandle, 0, sizeof(*Rtl->Cu)); if (!Cu) { return FALSE; } ZeroStructPointer(Cu); Cu->SizeOfStruct = sizeof(*Cu); Cu->NumberOfFunctions = sizeof(CU_FUNCTIONS) / sizeof(ULONG_PTR); Success = ResolveNvcudaFunctions(Rtl, Rtl->NvcudaModule, &Cu->Functions); if (!Success) { goto Error; } Result = Cu->Init(0); if (CU_FAILED(Result)) { goto Error; } Rtl->Flags.NvcudaInitialized = TRUE; } *CuPointer = Rtl->Cu; return TRUE; Error: HeapFree(Rtl->HeapHandle, 0, Cu); Rtl->Cu = NULL; return FALSE; } BOOL CALLBACK SetCSpecificHandlerCallback( PINIT_ONCE InitOnce, PVOID Parameter, PVOID *lpContext ) { PROC Handler; HMODULE Module; BOOL Success = FALSE; Module = (HMODULE)Parameter; if (Handler = GetProcAddress(Module, "__C_specific_handler")) { __C_specific_handler_impl = (P__C_SPECIFIC_HANDLER)Handler; Success = TRUE; } return Success; } BOOL SetCSpecificHandler( _In_ HMODULE Module ) { BOOL Status; Status = InitOnceExecuteOnce( &InitOnceCSpecificHandler, SetCSpecificHandlerCallback, Module, NULL ); return Status; } _Success_(return != 0) BOOL CALLBACK GetSystemTimerFunctionCallback( _Inout_ PINIT_ONCE InitOnce, _Inout_ PVOID Parameter, _Inout_opt_ PVOID *lpContext ) { HMODULE Module; FARPROC Proc; static SYSTEM_TIMER_FUNCTION SystemTimerFunction = { 0 }; if (!lpContext) { return FALSE; } Module = GetModuleHandle(TEXT("kernel32")); if (!Module || Module == INVALID_HANDLE_VALUE) { return FALSE; } Proc = GetProcAddress(Module, "GetSystemTimePreciseAsFileTime"); if (Proc) { SystemTimerFunction.GetSystemTimePreciseAsFileTime = ( (PGETSYSTEMTIMEPRECISEASFILETIME)Proc ); } else { Module = LoadLibrary(TEXT("ntdll")); if (!Module || Module == INVALID_HANDLE_VALUE) { return FALSE; } Proc = GetProcAddress(Module, "NtQuerySystemTime"); if (!Proc) { return FALSE; } SystemTimerFunction.NtQuerySystemTime = (PNTQUERYSYSTEMTIME)Proc; } *((PPSYSTEM_TIMER_FUNCTION)lpContext) = &SystemTimerFunction; return TRUE; } PSYSTEM_TIMER_FUNCTION GetSystemTimerFunction( VOID ) { BOOL Status; PSYSTEM_TIMER_FUNCTION SystemTimerFunction; Status = InitOnceExecuteOnce( &InitOnceSystemTimerFunction, GetSystemTimerFunctionCallback, NULL, (LPVOID *)&SystemTimerFunction ); if (!Status) { return NULL; } else { return SystemTimerFunction; } } _Check_return_ BOOL CallSystemTimer( _Out_ PFILETIME SystemTime, _Inout_opt_ PPSYSTEM_TIMER_FUNCTION ppSystemTimerFunction ) { PSYSTEM_TIMER_FUNCTION SystemTimerFunction = NULL; if (ppSystemTimerFunction) { if (*ppSystemTimerFunction) { SystemTimerFunction = *ppSystemTimerFunction; } else { SystemTimerFunction = GetSystemTimerFunction(); *ppSystemTimerFunction = SystemTimerFunction; } } else { SystemTimerFunction = GetSystemTimerFunction(); } if (!SystemTimerFunction) { return FALSE; } if (SystemTimerFunction->GetSystemTimePreciseAsFileTime) { SystemTimerFunction->GetSystemTimePreciseAsFileTime(SystemTime); } else if (SystemTimerFunction->NtQuerySystemTime) { BOOL Success = SystemTimerFunction->NtQuerySystemTime( (PLARGE_INTEGER)SystemTime ); if (!Success) { return FALSE; } } else { return FALSE; } return TRUE; } BOOL FindCharsInUnicodeString( _In_ PRTL Rtl, _In_ PCUNICODE_STRING String, _In_ WCHAR CharToFind, _Inout_ PRTL_BITMAP Bitmap, _In_ BOOL Reverse ) { USHORT Index; USHORT NumberOfCharacters = String->Length >> 1; ULONG Bit; WCHAR Char; // // We use two loop implementations in order to avoid an additional // branch during the loop (after we find a character match). // if (Reverse) { for (Index = 0; Index < NumberOfCharacters; Index++) { Char = String->Buffer[Index]; if (Char == CharToFind) { Bit = NumberOfCharacters - Index; FastSetBit(Bitmap, Bit); } } } else { for (Index = 0; Index < NumberOfCharacters; Index++) { Char = String->Buffer[Index]; if (Char == CharToFind) { FastSetBit(Bitmap, Index); } } } return TRUE; } BOOL FindCharsInString( _In_ PRTL Rtl, _In_ PCSTRING String, _In_ CHAR CharToFind, _Inout_ PRTL_BITMAP Bitmap, _In_ BOOL Reverse ) { USHORT Index; USHORT NumberOfCharacters = String->Length; ULONG Bit; CHAR Char; PRTL_SET_BIT RtlSetBit = Rtl->RtlSetBit; // // We use two loop implementations in order to avoid an additional // branch during the loop (after we find a character match). // if (Reverse) { for (Index = 0; Index < NumberOfCharacters; Index++) { Char = String->Buffer[Index]; if (Char == CharToFind) { Bit = NumberOfCharacters - Index; FastSetBit(Bitmap, Bit); } } } else { for (Index = 0; Index < NumberOfCharacters; Index++) { Char = String->Buffer[Index]; if (Char == CharToFind) { FastSetBit(Bitmap, Index); } } } return TRUE; } _Check_return_ BOOL CreateBitmapIndexForUnicodeString( _In_ PRTL Rtl, _In_ PCUNICODE_STRING String, _In_ WCHAR Char, _Inout_ PHANDLE HeapHandlePointer, _Inout_ PPRTL_BITMAP BitmapPointer, _In_ BOOL Reverse, _In_opt_ PFIND_CHARS_IN_UNICODE_STRING FindCharsFunction ) /*++ Routine Description: This is a helper function that simplifies creating bitmap indexes for UNICODE_STRING structures. The routine will use the user-supplied bitmap if it is big enough (governed by the SizeOfBitMap field). If it isn't, a new buffer will be allocated. If no bitmap is provided at all, the entire structure plus the bitmap buffer space will be allocated from the heap. Typically, callers would provide their own pointer to a stack-allocated RTL_BITMAP struct if they only need the bitmap for the scope of their function call. For longer-lived bitmaps, a pointer to a NULL pointer would be provided, indicating that the entire structure should be heap allocated. Caller is responsible for freeing either the entire RTL_BITMAP or the underlying Bitmap->Buffer if a heap allocation took place. Arguments: Rtl - Supplies the pointer to the RTL structure (mandatory). String - Supplies the UNICODE_STRING structure to create the bitmap index for (mandatory). Char - Supplies the character to create the bitmap index for. This is passed directly to FindCharsInUnicodeString(). HeapHandlePointer - Supplies a pointer to the underlying heap handle to use for allocation. If a heap allocation is required and this pointer points to a NULL value, the default process heap handle will be used (obtained via GetProcessHeap()), and the pointed-to location will be updated with the handle value. (The caller will need this in order to perform the subsequent HeapFree() of the relevant structure.) BitmapPointer - Supplies a pointer to a PRTL_BITMAP structure. If the pointed-to location is NULL, additional space for the RTL_BITMAP structure will be allocated on top of the bitmap buffer space, and the pointed-to location will be updated with the resulting address. If the pointed-to location is non-NULL and the SizeOfBitMap field is greater than or equal to the required bitmap size, the bitmap will be used directly and no heap allocations will take place. The SizeOfBitMap field in this example will be altered to match the required size. If a heap allocation takes place, user is responsible for cleaning it up (i.e. either freeing the entire PRTL_BITMAP struct returned, or just the Bitmap->Buffer, depending on usage). The macro MAYBE_FREE_BITMAP_BUFFER() should be used for this. (See Examples.) Reverse - Supplies a boolean flag indicating the bitmap index should be created in reverse. This is passed to FindCharsInUnicodeString(). FindCharsInUnicodeString - Supplies an optional pointer to a function that conforms to the PFIND_CHARS_IN_UNICODE_STRING signature. Return Value: TRUE on success, FALSE on error. Examples: A stack-allocated bitmap structure and 256-byte buffer: CHAR StackBitmapBuffer[256]; RTL_BITMAP Bitmap = { 32 << 3, (PULONG)&StackBitmapBuffer }; PRTL_BITMAP BitmapPointer = &Bitmap; HANDLE HeapHandle; BOOL Success = CreateBitmapIndexForUnicodeString(Rtl, String, L'\\', &HeapHandle, &BitmapPointer, FALSE); ... MAYBE_FREE_BITMAP_BUFFER(BitmapPointer, StackBitmapBuffer); return; --*/ { USHORT NumberOfCharacters; USHORT AlignedNumberOfCharacters; SIZE_T BitmapBufferSizeInBytes; BOOL UpdateBitmapPointer; BOOL UpdateHeapHandlePointer; BOOL Success; HANDLE HeapHandle = NULL; PRTL_BITMAP Bitmap = NULL; PFIND_CHARS_IN_UNICODE_STRING FindChars; // // Verify arguments. // if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(String)) { return FALSE; } if (!ARGUMENT_PRESENT(HeapHandlePointer)) { return FALSE; } if (!ARGUMENT_PRESENT(BitmapPointer)) { return FALSE; } // // Resolve the number of characters, then make sure it's aligned to the // platform's pointer size. // NumberOfCharacters = String->Length >> 1; AlignedNumberOfCharacters = ( ALIGN_UP_USHORT_TO_POINTER_SIZE( NumberOfCharacters ) ); BitmapBufferSizeInBytes = AlignedNumberOfCharacters >> 3; // // If *BitmapPointer is non-NULL, see if it's big enough to hold the bitmap. // if (*BitmapPointer) { if ((*BitmapPointer)->SizeOfBitMap >= AlignedNumberOfCharacters) { // // The user-provided bitmap is big enough. Jump straight to the // starting point. // Bitmap = *BitmapPointer; UpdateHeapHandlePointer = FALSE; UpdateBitmapPointer = FALSE; goto Start; } } if (!*HeapHandlePointer) { // // If the pointer to the heap handle to use is NULL, default to using // the default process heap via GetProcessHeap(). Note that we also // assign back to the user's pointer, such that they get a copy of the // heap handle that was used for allocation. // HeapHandle = GetProcessHeap(); if (!HeapHandle) { return FALSE; } UpdateHeapHandlePointer = TRUE; } else { // // Use the handle the user provided. // HeapHandle = *HeapHandlePointer; UpdateHeapHandlePointer = FALSE; } if (!*BitmapPointer) { // // If the pointer to the PRTL_BITMAP structure is NULL, the caller // wants us to allocate the space for the RTL_BITMAP structure as // well. // SIZE_T AllocationSize = BitmapBufferSizeInBytes + sizeof(RTL_BITMAP); Bitmap = (PRTL_BITMAP)HeapAlloc(HeapHandle, 0, AllocationSize); if (!Bitmap) { return FALSE; } // // Point the bitmap buffer to the end of the RTL_BITMAP struct. // Bitmap->Buffer = (PULONG)( RtlOffsetToPointer( Bitmap, sizeof(RTL_BITMAP) ) ); // // Make a note that we need to update the user's bitmap pointer. // UpdateBitmapPointer = TRUE; } else { // // The user has provided an existing PRTL_BITMAP structure, so we // only need to allocate memory for the actual underlying bitmap // buffer. // Bitmap = *BitmapPointer; Bitmap->Buffer = (PULONG)( HeapAlloc( HeapHandle, 0, BitmapBufferSizeInBytes ) ); if (!Bitmap->Buffer) { return FALSE; } // // Make a note that we do *not* need to update the user's bitmap // pointer. // UpdateBitmapPointer = FALSE; } Start: // // There will be one bit per character. // Bitmap->SizeOfBitMap = AlignedNumberOfCharacters; if (!Bitmap->Buffer) { __debugbreak(); } // // Clear all bits in the bitmap. // Rtl->RtlClearAllBits(Bitmap); // // Fill in the bitmap index. // FindChars = FindCharsFunction; if (!FindChars) { FindChars = FindCharsInUnicodeString; } Success = FindChars(Rtl, String, Char, Bitmap, Reverse); if (!Success && HeapHandle) { // // HeapHandle will only be set if we had to do heap allocations. // if (UpdateBitmapPointer) { // // Free the entire structure. // HeapFree(HeapHandle, 0, Bitmap); } else { // // Free just the buffer. // HeapFree(HeapHandle, 0, Bitmap->Buffer); } return FALSE; } // // Update caller's pointers if applicable, then return successfully. // if (UpdateBitmapPointer) { *BitmapPointer = Bitmap; } if (UpdateHeapHandlePointer) { *HeapHandlePointer = HeapHandle; } return TRUE; } _Check_return_ BOOL CreateBitmapIndexForString( _In_ PRTL Rtl, _In_ PCSTRING String, _In_ CHAR Char, _Inout_ PHANDLE HeapHandlePointer, _Inout_ PPRTL_BITMAP BitmapPointer, _In_ BOOL Reverse, _In_opt_ PFIND_CHARS_IN_STRING FindCharsFunction ) /*++ Routine Description: This is a helper function that simplifies creating bitmap indexes for STRING structures. The routine will use the user-supplied bitmap if it is big enough (governed by the SizeOfBitMap field). If it isn't, a new buffer will be allocated. If no bitmap is provided at all, the entire structure plus the bitmap buffer space will be allocated from the heap. Typically, callers would provide their own pointer to a stack-allocated RTL_BITMAP struct if they only need the bitmap for the scope of their function call. For longer-lived bitmaps, a pointer to a NULL pointer would be provided, indicating that the entire structure should be heap allocated. Caller is responsible for freeing either the entire RTL_BITMAP or the underlying Bitmap->Buffer if a heap allocation took place. Arguments: Rtl - Supplies the pointer to the RTL structure (mandatory). String - Supplies the STRING structure to create the bitmap index for. Char - Supplies the character to create the bitmap index for. This is passed directly to FindCharsInString(). HeapHandlePointer - Supplies a pointer to the underlying heap handle to use for allocation. If a heap allocation is required and this pointer points to a NULL value, the default process heap handle will be used (obtained via GetProcessHeap()), and the pointed-to location will be updated with the handle value. (The caller will need this in order to perform the subsequent HeapFree() of the relevant structure.) BitmapPointer - Supplies a pointer to a PRTL_BITMAP structure. If the pointed-to location is NULL, additional space for the RTL_BITMAP structure will be allocated on top of the bitmap buffer space, and the pointed-to location will be updated with the resulting address. If the pointed-to location is non-NULL and the SizeOfBitMap field is greater than or equal to the required bitmap size, the bitmap will be used directly and no heap allocations will take place. The SizeOfBitMap field in this example will be altered to match the required size. If a heap allocation takes place, user is responsible for cleaning it up (i.e. either freeing the entire PRTL_BITMAP struct returned, or just the Bitmap->Buffer, depending on usage). The macro MAYBE_FREE_BITMAP_BUFFER() should be used for this. (See Examples.) Reverse - Supplies a boolean flag indicating the bitmap index should be created in reverse. This is passed to FindCharsInString(). FindCharsInString - Supplies an optional pointer to a function that conforms to the PFIND_CHARS_IN_STRING signature. Return Value: TRUE on success, FALSE on error. Examples: A stack-allocated bitmap structure and buffer: CHAR StackBitmapBuffer[256]; RTL_BITMAP Bitmap = { 32 << 3, (PULONG)&StackBitmapBuffer }; PRTL_BITMAP BitmapPointer = &Bitmap; HANDLE HeapHandle; BOOL Success = CreateBitmapIndexForString(Rtl, String, '\\', &HeapHandle, &BitmapPointer, FALSE); ... MAYBE_FREE_BITMAP_BUFFER(BitmapPointer, StackBitmapBuffer); return; --*/ { USHORT NumberOfCharacters; USHORT AlignedNumberOfCharacters; SIZE_T BitmapBufferSizeInBytes; BOOL UpdateBitmapPointer; BOOL UpdateHeapHandlePointer; BOOL Success; HANDLE HeapHandle = NULL; PRTL_BITMAP Bitmap = NULL; PFIND_CHARS_IN_STRING FindChars; // // Verify arguments. // if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(String)) { return FALSE; } if (!ARGUMENT_PRESENT(HeapHandlePointer)) { return FALSE; } if (!ARGUMENT_PRESENT(BitmapPointer)) { return FALSE; } // // Resolve the number of characters, then make sure it's aligned to the // platform's pointer size. // NumberOfCharacters = String->Length; AlignedNumberOfCharacters = ( ALIGN_UP_USHORT_TO_POINTER_SIZE( NumberOfCharacters ) ); BitmapBufferSizeInBytes = AlignedNumberOfCharacters >> 3; // // If *BitmapPointer is non-NULL, see if it's big enough to hold the bitmap. // if (*BitmapPointer) { if ((*BitmapPointer)->SizeOfBitMap >= AlignedNumberOfCharacters) { // // The user-provided bitmap is big enough. Jump straight to the // starting point. // Bitmap = *BitmapPointer; UpdateHeapHandlePointer = FALSE; UpdateBitmapPointer = FALSE; goto Start; } } if (!*HeapHandlePointer) { // // If the pointer to the heap handle to use is NULL, default to using // the default process heap via GetProcessHeap(). Note that we also // assign back to the user's pointer, such that they get a copy of the // heap handle that was used for allocation. // HeapHandle = GetProcessHeap(); if (!HeapHandle) { return FALSE; } UpdateHeapHandlePointer = TRUE; } else { // // Use the handle the user provided. // HeapHandle = *HeapHandlePointer; UpdateHeapHandlePointer = FALSE; } if (!*BitmapPointer) { // // If the pointer to the PRTL_BITMAP structure is NULL, the caller // wants us to allocate the space for the RTL_BITMAP structure as // well. // SIZE_T AllocationSize = BitmapBufferSizeInBytes + sizeof(RTL_BITMAP); Bitmap = (PRTL_BITMAP)HeapAlloc(HeapHandle, 0, AllocationSize); if (!Bitmap) { return FALSE; } // // Point the bitmap buffer to the end of the RTL_BITMAP struct. // Bitmap->Buffer = (PULONG)( RtlOffsetToPointer( Bitmap, sizeof(RTL_BITMAP) ) ); // // Make a note that we need to update the user's bitmap pointer. // UpdateBitmapPointer = TRUE; } else { // // The user has provided an existing PRTL_BITMAP structure, so we // only need to allocate memory for the actual underlying bitmap // buffer. // Bitmap = *BitmapPointer; Bitmap->Buffer = (PULONG)( HeapAlloc( HeapHandle, HEAP_ZERO_MEMORY, BitmapBufferSizeInBytes ) ); if (!Bitmap->Buffer) { return FALSE; } // // Make a note that we do *not* need to update the user's bitmap // pointer. // UpdateBitmapPointer = FALSE; } Start: if (!Bitmap->Buffer) { __debugbreak(); } // // Clear the bitmap. We use SecureZeroMemory() instead of RtlClearAllBits() // as the latter is dependent upon the SizeOfBitMap field, which a) isn't // set here, and b) will be set to NumberOfCharacters when it is set, which // may be less than AlignedNumberOfCharacters, which means some trailing // bits could be non-zero if we are re-using the caller's stack-allocated // bitmap buffer. // SecureZeroMemory(Bitmap->Buffer, BitmapBufferSizeInBytes); // // There will be one bit per character. // // Bitmap->SizeOfBitMap = NumberOfCharacters; // // Fill in the bitmap index. // FindChars = FindCharsFunction; if (!FindChars) { FindChars = FindCharsInString; } Success = FindChars(Rtl, String, Char, Bitmap, Reverse); if (!Success && HeapHandle) { // // HeapHandle will only be set if we had to do heap allocations. // if (UpdateBitmapPointer) { // // Free the entire structure. // HeapFree(HeapHandle, 0, Bitmap); } else { // // Free just the buffer. // HeapFree(HeapHandle, 0, Bitmap->Buffer); } return FALSE; } // // Update caller's pointers if applicable, then return successfully. // if (UpdateBitmapPointer) { *BitmapPointer = Bitmap; } if (UpdateHeapHandlePointer) { *HeapHandlePointer = HeapHandle; } return TRUE; } _Check_return_ BOOL FilesExistW( _In_ PRTL Rtl, _In_ PUNICODE_STRING Directory, _In_ USHORT NumberOfFilenames, _In_ PPUNICODE_STRING Filenames, _Out_ PBOOL Exists, _Out_opt_ PUSHORT WhichIndex, _Out_opt_ PPUNICODE_STRING WhichFilename ) { USHORT Index; PWCHAR HeapBuffer; ULONG CombinedSizeInBytes; USHORT DirectoryLength; USHORT MaxFilenameLength = 0; UNICODE_STRING Path; PUNICODE_STRING Filename; DWORD Attributes; BOOL Success = FALSE; HANDLE HeapHandle = NULL; WCHAR StackBuffer[_MAX_PATH]; // // Validate arguments. // if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(Directory)) { return FALSE; } if (NumberOfFilenames == 0) { return FALSE; } if (!ARGUMENT_PRESENT(Filenames) || !ARGUMENT_PRESENT(Filenames[0])) { return FALSE; } for (Index = 0; Index < NumberOfFilenames; Index++) { BOOL SanityCheck; Filename = Filenames[Index]; // // Quick sanity check that the Filename pointer in the array // entry is non-NULL, the Length member is greater than 0, // and the buffer has a non-NULL value. // SanityCheck = ( Filename && Filename->Length > 0 && Filename->Buffer != NULL ); if (!SanityCheck) { __debugbreak(); } // // Update our local maximum filename length variable if applicable. // if (Filename->Length > MaxFilenameLength) { MaxFilenameLength = Filename->Length; } } // // See if the combined size of the extended volume prefix ("\\?\"), // directory, joining backslash, maximum filename length and terminating // NUL is less than or equal to _MAX_PATH. If it is, we can use the // stack-allocated Path buffer above; if not, allocate a new buffer from // the default heap. // CombinedSizeInBytes = ( ExtendedLengthVolumePrefixW.Length + Directory->Length + sizeof(WCHAR) + // joining backslash MaxFilenameLength + sizeof(WCHAR) // terminating NUL ); // // Point Path->Buffer at the stack or heap buffer depending on the // combined size. // if (CombinedSizeInBytes <= _MAX_PATH) { // // We can use our stack buffer. // Path.Buffer = &StackBuffer[0]; } else if (CombinedSizeInBytes > MAX_USTRING) { goto Error; } else { // // The combined size exceeds _MAX_PATH so allocate the required memory // from the heap. // HeapHandle = GetProcessHeap(); if (!HeapHandle) { return FALSE; } HeapBuffer = (PWCHAR)HeapAlloc(HeapHandle, 0, CombinedSizeInBytes); if (!HeapBuffer) { return FALSE; } Path.Buffer = HeapBuffer; } Path.Length = 0; Path.MaximumLength = (USHORT)CombinedSizeInBytes; // // Copy the volume prefix, then append the directory and joining backslash. // Rtl->RtlCopyUnicodeString(&Path, &ExtendedLengthVolumePrefixW); if (FAILED(Rtl->RtlAppendUnicodeStringToString(&Path, Directory)) || !AppendUnicodeCharToUnicodeString(&Path, L'\\')) { goto Error; } // // Make a note of the length at this point as we'll need to revert to it // after each unsuccessful file test. // DirectoryLength = Path.Length; // // Enumerate over the array of filenames and look for the first one that // exists. // for (Index = 0; Index < NumberOfFilenames; Index++) { Filename = Filenames[Index]; // // We've already validated our lengths, so these should never fail. // if (FAILED(Rtl->RtlAppendUnicodeStringToString(&Path, Filename)) || !AppendUnicodeCharToUnicodeString(&Path, L'\0')) { goto Error; } // // We successfully constructed the path, so we can now look up the file // attributes. // Attributes = GetFileAttributesW(Path.Buffer); if (Attributes == INVALID_FILE_ATTRIBUTES || (Attributes & FILE_ATTRIBUTE_DIRECTORY)) { // // File doesn't exist or is a directory. Reset the path length // and continue. // Path.Length = DirectoryLength; continue; } // // Success! File exists and *isn't* a directory. We're done. // Success = TRUE; break; } if (!Success) { *Exists = FALSE; // // The files didn't exist, but no error occurred, so we return success. // Success = TRUE; } else { *Exists = TRUE; // // Update the user's pointers if applicable. // if (ARGUMENT_PRESENT(WhichIndex)) { *WhichIndex = Index; } if (ARGUMENT_PRESENT(WhichFilename)) { *WhichFilename = Filename; } } // // Intentional follow-on to "Error"; Success code will be set // appropriately by this stage. // Error: if (HeapHandle) { HeapFree(HeapHandle, 0, Path.Buffer); } return Success; } _Check_return_ BOOL FilesExistExW( _In_ PRTL Rtl, _In_ PUNICODE_STRING Directory, _In_ USHORT NumberOfFilenames, _In_ PPUNICODE_STRING Filenames, _Out_ PBOOL Exists, _Out_opt_ PUSHORT WhichIndex, _Out_opt_ PPUNICODE_STRING WhichFilename, _In_ PALLOCATOR Allocator ) { USHORT Index; PWCHAR HeapBuffer; ULONG CombinedSizeInBytes; USHORT DirectoryLength; USHORT MaxFilenameLength = 0; UNICODE_STRING Path; PUNICODE_STRING Filename; DWORD Attributes; BOOL Success = FALSE; HANDLE HeapHandle = NULL; WCHAR StackBuffer[_MAX_PATH]; // // Validate arguments. // if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(Directory)) { return FALSE; } if (NumberOfFilenames == 0) { return FALSE; } if (!ARGUMENT_PRESENT(Filenames) || !ARGUMENT_PRESENT(Filenames[0])) { return FALSE; } for (Index = 0; Index < NumberOfFilenames; Index++) { BOOL SanityCheck; Filename = Filenames[Index]; // // Quick sanity check that the Filename pointer in the array // entry is non-NULL, the Length member is greater than 0, // and the buffer has a non-NULL value. // SanityCheck = ( Filename && Filename->Length > 0 && Filename->Buffer != NULL ); if (!SanityCheck) { __debugbreak(); } // // Update our local maximum filename length variable if applicable. // if (Filename->Length > MaxFilenameLength) { MaxFilenameLength = Filename->Length; } } // // See if the combined size of the extended volume prefix ("\\?\"), // directory, joining backslash, maximum filename length and terminating // NUL is less than or equal to _MAX_PATH. If it is, we can use the // stack-allocated Path buffer above; if not, allocate a new buffer from // the default heap. // CombinedSizeInBytes = ( ExtendedLengthVolumePrefixW.Length + Directory->Length + sizeof(WCHAR) + // joining backslash MaxFilenameLength + sizeof(WCHAR) // terminating NUL ); // // Point Path->Buffer at the stack or heap buffer depending on the // combined size. // if (CombinedSizeInBytes <= _MAX_PATH) { // // We can use our stack buffer. // Path.Buffer = &StackBuffer[0]; } else if (CombinedSizeInBytes > MAX_USTRING) { goto Error; } else { // // The combined size exceeds _MAX_PATH so allocate the required memory. // HeapBuffer = (PWCHAR)( Allocator->Calloc( Allocator->Context, 1, CombinedSizeInBytes ) ); if (!HeapBuffer) { return FALSE; } Path.Buffer = HeapBuffer; } Path.Length = 0; Path.MaximumLength = (USHORT)CombinedSizeInBytes; // // Copy the volume prefix, then append the directory and joining backslash. // Rtl->RtlCopyUnicodeString(&Path, &ExtendedLengthVolumePrefixW); if (FAILED(Rtl->RtlAppendUnicodeStringToString(&Path, Directory)) || !AppendUnicodeCharToUnicodeString(&Path, L'\\')) { goto Error; } // // Make a note of the length at this point as we'll need to revert to it // after each unsuccessful file test. // DirectoryLength = Path.Length; // // Enumerate over the array of filenames and look for the first one that // exists. // for (Index = 0; Index < NumberOfFilenames; Index++) { Filename = Filenames[Index]; // // We've already validated our lengths, so these should never fail. // if (FAILED(Rtl->RtlAppendUnicodeStringToString(&Path, Filename)) || !AppendUnicodeCharToUnicodeString(&Path, L'\0')) { goto Error; } // // We successfully constructed the path, so we can now look up the file // attributes. // Attributes = GetFileAttributesW(Path.Buffer); if (Attributes == INVALID_FILE_ATTRIBUTES || (Attributes & FILE_ATTRIBUTE_DIRECTORY)) { // // File doesn't exist or is a directory. Reset the path length // and continue. // Path.Length = DirectoryLength; continue; } // // Success! File exists and *isn't* a directory. We're done. // Success = TRUE; break; } if (!Success) { *Exists = FALSE; // // The files didn't exist, but no error occurred, so we return success. // Success = TRUE; } else { *Exists = TRUE; // // Update the user's pointers if applicable. // if (ARGUMENT_PRESENT(WhichIndex)) { *WhichIndex = Index; } if (ARGUMENT_PRESENT(WhichFilename)) { *WhichFilename = Filename; } } // // Intentional follow-on to "Error"; Success code will be set // appropriately by this stage. // Error: if (HeapBuffer) { Allocator->Free(Allocator->Context, HeapBuffer); } return Success; } _Success_(return != 0) _Check_return_ BOOL FilesExistA( _In_ PRTL Rtl, _In_ PSTRING Directory, _In_ USHORT NumberOfFilenames, _In_ PPSTRING Filenames, _Out_ PBOOL Exists, _Out_opt_ PUSHORT WhichIndex, _Out_opt_ PPSTRING WhichFilename ) { USHORT Index; PCHAR HeapBuffer; ULONG CombinedSizeInBytes; USHORT DirectoryLength; USHORT MaxFilenameLength = 0; STRING Path; PSTRING Filename; DWORD Attributes; BOOL Success = FALSE; HANDLE HeapHandle = NULL; CHAR StackBuffer[_MAX_PATH]; // // Validate arguments. // if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(Directory)) { return FALSE; } if (NumberOfFilenames == 0) { return FALSE; } if (!ARGUMENT_PRESENT(Filenames) || !ARGUMENT_PRESENT(Filenames[0])) { return FALSE; } for (Index = 0; Index < NumberOfFilenames; Index++) { BOOL SanityCheck; Filename = Filenames[Index]; // // Quick sanity check that the Filename pointer in the array // entry is non-NULL, the Length member is greater than 0, // and the buffer has a non-NULL value. // SanityCheck = ( Filename && Filename->Length > 0 && Filename->Buffer != NULL ); if (!SanityCheck) { __debugbreak(); } if (Filename->Length > MaxFilenameLength) { MaxFilenameLength = Filename->Length; } } // // See if the combined size of the extended volume prefix ("\\?\"), // directory, joining backslash, maximum filename length and terminating // NUL is less than or equal to _MAX_PATH. If it is, we can use the // stack-allocated Path buffer above; if not, allocate a new buffer from // the default heap. // CombinedSizeInBytes = ( ExtendedLengthVolumePrefixA.Length + Directory->Length + sizeof(CHAR) + // joining backslash MaxFilenameLength + sizeof(CHAR) // terminating NUL ); // // Point Path->Buffer at the stack or heap buffer depending on the // combined size. // if (CombinedSizeInBytes <= _MAX_PATH) { // // We can use our stack buffer. // Path.Buffer = &StackBuffer[0]; } else if (CombinedSizeInBytes > MAX_STRING) { goto Error; } else { // // The combined size exceeds _MAX_PATH so allocate the required memory // from the heap. // HeapHandle = GetProcessHeap(); if (!HeapHandle) { return FALSE; } HeapBuffer = (PCHAR)HeapAlloc(HeapHandle, 0, CombinedSizeInBytes); if (!HeapBuffer) { return FALSE; } Path.Buffer = HeapBuffer; } Path.Length = 0; Path.MaximumLength = (USHORT)CombinedSizeInBytes; // // Copy the volume prefix, then append the directory and joining backslash. // if (!CopyString(&Path, &ExtendedLengthVolumePrefixA)) { goto Error; } if (!AppendStringAndCharToString(&Path, Directory, '\\')) { goto Error; } // // Make a note of the length at this point as we'll need to revert to it // after each unsuccessful file test. // DirectoryLength = Path.Length; // // Enumerate over the array of filenames and look for the first one that // exists. // for (Index = 0; Index < NumberOfFilenames; Index++) { Filename = Filenames[Index]; // // We've already validated our lengths, so these should never fail. // if (!AppendStringAndCharToString(&Path, Filename, '\0')) { goto Error; } // // We successfully constructed the path, so we can now look up the file // attributes. // Attributes = GetFileAttributesA(Path.Buffer); if (Attributes == INVALID_FILE_ATTRIBUTES || (Attributes & FILE_ATTRIBUTE_DIRECTORY)) { // // File doesn't exist or is a directory. Reset the path length // and continue. // Path.Length = DirectoryLength; continue; } // // Success! File exists and *isn't* a directory. We're done. // Success = TRUE; break; } if (!Success) { *Exists = FALSE; // // The files didn't exist, but no error occurred, so we return success. // Success = TRUE; } else { *Exists = TRUE; // // Update the user's pointers if applicable. // if (ARGUMENT_PRESENT(WhichIndex)) { *WhichIndex = Index; } if (ARGUMENT_PRESENT(WhichFilename)) { *WhichFilename = Filename; } } // // Intentional follow-on to "Error"; Success code will be set // appropriately by this stage. // Error: if (HeapHandle) { HeapFree(HeapHandle, 0, Path.Buffer); } return Success; } _Success_(return != 0) BOOL CreateUnicodeString( _In_ PRTL Rtl, _In_ PCUNICODE_STRING Source, _Out_ PPUNICODE_STRING Destination, _In_ PALLOCATION_ROUTINE AllocationRoutine, _In_ PVOID AllocationContext ) { if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(Source)) { return FALSE; } if (!ARGUMENT_PRESENT(Destination)) { return FALSE; } if (!ARGUMENT_PRESENT(AllocationRoutine)) { return FALSE; } return CreateUnicodeStringInline(Rtl, Source, Destination, AllocationRoutine, AllocationContext); } _Check_return_ BOOL LoadRtlSymbols(_Inout_ PRTL Rtl) { if (!(Rtl->Kernel32Module = LoadLibraryA("kernel32"))) { return FALSE; } if (!(Rtl->KernelBaseModule = LoadLibraryA("kernelbase"))) { return FALSE; } if (!(Rtl->NtdllModule = LoadLibraryA("ntdll"))) { return FALSE; } if (!(Rtl->NtosKrnlModule = LoadLibraryA("ntoskrnl.exe"))) { return FALSE; } Rtl->GetSystemTimePreciseAsFileTime = (PGETSYSTEMTIMEPRECISEASFILETIME) GetProcAddress(Rtl->Kernel32Module, "GetSystemTimePreciseAsFileTime"); Rtl->NtQuerySystemTime = (PNTQUERYSYSTEMTIME) GetProcAddress(Rtl->NtdllModule, "NtQuerySystemTime"); if (Rtl->GetSystemTimePreciseAsFileTime) { Rtl->SystemTimerFunction.GetSystemTimePreciseAsFileTime = Rtl->GetSystemTimePreciseAsFileTime; } else if (Rtl->NtQuerySystemTime) { Rtl->SystemTimerFunction.NtQuerySystemTime = Rtl->NtQuerySystemTime; } else { return FALSE; } if (!ResolveRtlFunctions(Rtl)) { return FALSE; } // // This is a hack; we need RtlCompareString() from within PCRTCOMPARE-type // functions passed to bsearch/qsort. // _RtlCompareString = Rtl->RtlCompareString; return TRUE; } _Use_decl_annotations_ BOOL ResolveRtlFunctions( PRTL Rtl ) { BOOL Success; ULONG NumberOfResolvedSymbols; ULONG ExpectedNumberOfResolvedSymbols; PULONG_PTR Functions = (PULONG_PTR)&Rtl->RtlFunctions; HMODULE Modules[] = { Rtl->KernelBaseModule, Rtl->Kernel32Module, Rtl->NtdllModule, Rtl->NtosKrnlModule, }; // // Temp hack in lieu of proper refactoring. // #ifdef Names #undef Names #endif #define Names RtlFunctionNames ULONG BitmapBuffer[(ALIGN_UP(ARRAYSIZE(Names), sizeof(ULONG) << 3) >> 5)+1]; RTL_BITMAP FailedBitmap = { ARRAYSIZE(Names)+1, (PULONG)&BitmapBuffer }; ExpectedNumberOfResolvedSymbols = ARRAYSIZE(Names); Success = LoadSymbolsFromMultipleModules( Names, ARRAYSIZE(Names), Functions, sizeof(Rtl->RtlFunctions) / sizeof(ULONG_PTR), Modules, ARRAYSIZE(Modules), &FailedBitmap, FALSE, &NumberOfResolvedSymbols ); if (!Success) { __debugbreak(); } if (ExpectedNumberOfResolvedSymbols != NumberOfResolvedSymbols) { PCSTR FirstFailedSymbolName; ULONG FirstFailedSymbol; ULONG NumberOfFailedSymbols; NumberOfFailedSymbols = Rtl->RtlNumberOfSetBits(&FailedBitmap); FirstFailedSymbol = Rtl->RtlFindSetBits(&FailedBitmap, 1, 0); FirstFailedSymbolName = Names[FirstFailedSymbol-1]; __debugbreak(); } #undef Names return TRUE; } _Use_decl_annotations_ BOOL ResolveRtlExFunctions( PRTL Rtl, HMODULE RtlExModule, PRTLEXFUNCTIONS RtlExFunctions ) { BOOL Success; ULONG NumberOfResolvedSymbols; ULONG ExpectedNumberOfResolvedSymbols; PULONG_PTR Functions = (PULONG_PTR)RtlExFunctions; #ifdef Names #undef Names #endif #define Names RtlExFunctionNames ULONG BitmapBuffer[(ALIGN_UP(ARRAYSIZE(Names), sizeof(ULONG) << 3) >> 5)+1]; RTL_BITMAP FailedBitmap = { ARRAYSIZE(Names)+1, (PULONG)&BitmapBuffer }; ExpectedNumberOfResolvedSymbols = ARRAYSIZE(Names); Success = LoadSymbols( Names, ARRAYSIZE(Names), Functions, sizeof(*RtlExFunctions) / sizeof(ULONG_PTR), RtlExModule, &FailedBitmap, TRUE, &NumberOfResolvedSymbols ); if (!Success) { __debugbreak(); } if (ExpectedNumberOfResolvedSymbols != NumberOfResolvedSymbols) { PCSTR FirstFailedSymbolName; ULONG FirstFailedSymbol; ULONG NumberOfFailedSymbols; NumberOfFailedSymbols = Rtl->RtlNumberOfSetBits(&FailedBitmap); FirstFailedSymbol = Rtl->RtlFindSetBits(&FailedBitmap, 1, 0); FirstFailedSymbolName = Names[FirstFailedSymbol-1]; __debugbreak(); } #undef Names return TRUE; } _Check_return_ BOOL ResolveDbgHelpFunctions( _In_ PRTL Rtl, _In_ HMODULE DbgHelpModule, _In_ PDBG Dbg ) { BOOL Success; ULONG NumberOfResolvedSymbols; ULONG ExpectedNumberOfResolvedSymbols; PULONG_PTR Functions = (PULONG_PTR)Dbg; #ifdef Names #undef Names #endif #define Names DbgHelpFunctionNames // // End of auto-generated section. // ULONG BitmapBuffer[(ALIGN_UP(ARRAYSIZE(Names), sizeof(ULONG) << 3) >> 5)+1]; RTL_BITMAP FailedBitmap = { ARRAYSIZE(Names)+1, (PULONG)&BitmapBuffer }; ExpectedNumberOfResolvedSymbols = ARRAYSIZE(Names); Success = LoadSymbols( Names, ARRAYSIZE(Names), Functions, sizeof(*Dbg) / sizeof(ULONG_PTR), DbgHelpModule, &FailedBitmap, TRUE, &NumberOfResolvedSymbols ); if (!Success) { __debugbreak(); } if (ExpectedNumberOfResolvedSymbols != NumberOfResolvedSymbols) { PCSTR FirstFailedSymbolName; ULONG FirstFailedSymbol; ULONG NumberOfFailedSymbols; NumberOfFailedSymbols = Rtl->RtlNumberOfSetBits(&FailedBitmap); FirstFailedSymbol = Rtl->RtlFindSetBits(&FailedBitmap, 1, 0); FirstFailedSymbolName = Names[FirstFailedSymbol-1]; __debugbreak(); } #undef Names return TRUE; } RTL_API BOOLEAN RtlCheckBit( _In_ PRTL_BITMAP BitMapHeader, _In_ ULONG BitPosition ) { #ifdef _M_AMD64 return BitTest64((LONG64 const *)BitMapHeader->Buffer, (LONG64)BitPosition); #else return BitTest((LONG const *)BitMapHeader->Buffer, (LONG)BitPosition); #endif } // // Functions for Splay Macros // /* RTL_API VOID RtlInitializeSplayLinks( _Out_ PRTL_SPLAY_LINKS Links ) { Links->Parent = Links; Links->LeftChild = NULL; Links->RightChild = NULL; } RTL_API PRTL_SPLAY_LINKS RtlParent(_In_ PRTL_SPLAY_LINKS Links) { return Links->Parent; } RTL_API PRTL_SPLAY_LINKS RtlLeftChild(_In_ PRTL_SPLAY_LINKS Links) { return Links->LeftChild; } RTL_API PRTL_SPLAY_LINKS RtlRightChild(_In_ PRTL_SPLAY_LINKS Links) { return Links->RightChild; } RTL_API BOOLEAN RtlIsRoot(_In_ PRTL_SPLAY_LINKS Links) { return (RtlParent(Links) == Links); } RTL_API BOOLEAN RtlIsLeftChild(_In_ PRTL_SPLAY_LINKS Links) { return (RtlLeftChild(RtlParent(Links)) == Links); } RTL_API BOOLEAN RtlIsRightChild(_In_ PRTL_SPLAY_LINKS Links) { return (RtlRightChild(RtlParent(Links)) == Links); } RTL_API VOID RtlInsertAsLeftChild ( _Inout_ PRTL_SPLAY_LINKS ParentLinks, _Inout_ PRTL_SPLAY_LINKS ChildLinks ) { ParentLinks->LeftChild = ChildLinks; ChildLinks->Parent = ParentLinks; } RTL_API VOID RtlInsertAsRightChild ( _Inout_ PRTL_SPLAY_LINKS ParentLinks, _Inout_ PRTL_SPLAY_LINKS ChildLinks ) { ParentLinks->RightChild = ChildLinks; ChildLinks->Parent = ParentLinks; } */ RTL_API LONG CompareStringCaseInsensitive( _In_ PCSTRING String1, _In_ PCSTRING String2 ) { return _RtlCompareString(String1, String2, FALSE); } _Check_return_ BOOL LoadRtlExSymbols( _In_opt_ HMODULE RtlExModule, _Inout_ PRTL Rtl ) { HMODULE Module; if (!Rtl) { return FALSE; } if (RtlExModule) { Module = RtlExModule; } else { LPCTSTR Target = (LPCTSTR)ResolveRtlExFunctions; DWORD Flags = ( GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT ); if (!GetModuleHandleEx(Flags, Target, &Module)) { return FALSE; } if (!Module) { return FALSE; } } if (!ResolveRtlExFunctions(Rtl, Module, &Rtl->RtlExFunctions)) { return FALSE; } return TRUE; } _Check_return_ _Success_(return != 0) BOOL InitializeWindowsDirectories( _In_ PRTL Rtl ) { PWSTR Dest; ULONG_INTEGER SizeInBytesExcludingNull; ULONG_INTEGER SizeInBytesIncludingNull; ULONG_INTEGER LengthInCharsExcludingNull; ULONG_INTEGER LengthInCharsIncludingNull; PUNICODE_STRING WindowsDirectory; PUNICODE_STRING WindowsSxSDirectory; PUNICODE_STRING WindowsSystemDirectory; const UNICODE_STRING WinSxS = RTL_CONSTANT_STRING(L"\\WinSxS"); // // Initialize aliases. // WindowsDirectory = &Rtl->WindowsDirectory; LengthInCharsIncludingNull.LongPart = GetWindowsDirectory(NULL, 0); if (!LengthInCharsIncludingNull.LongPart) { Rtl->LastError = GetLastError(); return FALSE; } // // Sanity check the size isn't above MAX_USHORT. // SizeInBytesIncludingNull.LongPart = ( LengthInCharsIncludingNull.LongPart << 1 ); if (SizeInBytesIncludingNull.HighPart) { __debugbreak(); return FALSE; } // // Allocate space for the buffer. // WindowsDirectory->Buffer = (PWSTR)( HeapAlloc( Rtl->HeapHandle, 0, SizeInBytesIncludingNull.LongPart ) ); if (!WindowsDirectory->Buffer) { Rtl->LastError = GetLastError(); return FALSE; } // // Initialize lengths. // SizeInBytesExcludingNull.LongPart = ( SizeInBytesIncludingNull.LongPart - sizeof(WCHAR) ); WindowsDirectory->Length = SizeInBytesExcludingNull.LowPart; WindowsDirectory->MaximumLength = SizeInBytesIncludingNull.LowPart; // // Call GetWindowsDirectory() again with the newly allocated buffer. // LengthInCharsExcludingNull.LongPart = ( GetWindowsDirectory( WindowsDirectory->Buffer, LengthInCharsIncludingNull.LongPart ) ); if (LengthInCharsExcludingNull.LongPart + 1 != LengthInCharsIncludingNull.LongPart) { Rtl->LastError = GetLastError(); return FALSE; } // // Now process WinSxS directory. // WindowsSxSDirectory = &Rtl->WindowsSxSDirectory; // // Add the length of the "\\WinSxS" suffix. Use WinSxS.Length as we've // already accounted for the trailing NULL. // LengthInCharsIncludingNull.LongPart += WinSxS.Length >> 1; // // Convert into size in bytes. // SizeInBytesIncludingNull.LongPart = ( LengthInCharsIncludingNull.LongPart << 1 ); // // Sanity check the size isn't above MAX_USHORT. // if (SizeInBytesIncludingNull.HighPart) { __debugbreak(); return FALSE; } // // Allocate space for the buffer. // WindowsSxSDirectory->Buffer = (PWSTR)( HeapAlloc( Rtl->HeapHandle, 0, SizeInBytesIncludingNull.LongPart ) ); if (!WindowsSxSDirectory->Buffer) { Rtl->LastError = GetLastError(); return FALSE; } // // Initialize lengths. // SizeInBytesExcludingNull.LongPart = ( SizeInBytesIncludingNull.LongPart - sizeof(WCHAR) ); WindowsSxSDirectory->Length = SizeInBytesExcludingNull.LowPart; WindowsSxSDirectory->MaximumLength = SizeInBytesIncludingNull.LowPart; // // Copy the Windows directory prefix over, excluding the terminating NULL. // Dest = WindowsSxSDirectory->Buffer; __movsw((PWORD)Dest, (PWORD)WindowsDirectory->Buffer, WindowsDirectory->Length >> 1); // // Copy the "\\WinSxS" suffix. // Dest += (WindowsDirectory->Length >> 1); __movsw((PWORD)Dest, (PWORD)WinSxS.Buffer, WinSxS.Length >> 1); // // Add terminating NULL. // Dest += (WinSxS.Length >> 1); *Dest = L'\0'; // // Sanity check things are where they should be. // if (WindowsSxSDirectory->Buffer[WindowsDirectory->Length >> 1] != L'\\') { __debugbreak(); } if (WindowsSxSDirectory->Buffer[WindowsSxSDirectory->Length>>1] != L'\0') { __debugbreak(); } // // Now do the Windows system directory. // WindowsSystemDirectory = &Rtl->WindowsSystemDirectory; LengthInCharsIncludingNull.LongPart = GetSystemDirectory(NULL, 0); if (!LengthInCharsIncludingNull.LongPart) { Rtl->LastError = GetLastError(); return FALSE; } // // Sanity check the size isn't above MAX_USHORT. // SizeInBytesIncludingNull.LongPart = ( LengthInCharsIncludingNull.LongPart << 1 ); if (SizeInBytesIncludingNull.HighPart) { __debugbreak(); return FALSE; } // // Allocate space for the buffer. // WindowsSystemDirectory->Buffer = (PWSTR)( HeapAlloc( Rtl->HeapHandle, 0, SizeInBytesIncludingNull.LongPart ) ); if (!WindowsSystemDirectory->Buffer) { Rtl->LastError = GetLastError(); return FALSE; } // // Initialize lengths. // SizeInBytesExcludingNull.LongPart = ( SizeInBytesIncludingNull.LongPart - sizeof(WCHAR) ); WindowsSystemDirectory->Length = SizeInBytesExcludingNull.LowPart; WindowsSystemDirectory->MaximumLength = SizeInBytesIncludingNull.LowPart; // // Call GetSystemDirectory() again with the newly allocated buffer. // LengthInCharsExcludingNull.LongPart = ( GetSystemDirectory( WindowsSystemDirectory->Buffer, LengthInCharsIncludingNull.LongPart ) ); if (LengthInCharsExcludingNull.LongPart + 1 != LengthInCharsIncludingNull.LongPart) { Rtl->LastError = GetLastError(); return FALSE; } return TRUE; } RTL_API CRYPT_GEN_RANDOM RtlCryptGenRandom; _Use_decl_annotations_ BOOL RtlCryptGenRandom( PRTL Rtl, ULONG SizeOfBufferInBytes, PBYTE Buffer ) { if (!ARGUMENT_PRESENT(Rtl)) { return FALSE; } if (!ARGUMENT_PRESENT(Rtl->CryptProv)) { return FALSE; } if (!ARGUMENT_PRESENT(Buffer)) { return FALSE; } if (!CryptGenRandom(Rtl->CryptProv, SizeOfBufferInBytes, Buffer)) { Rtl->LastError = GetLastError(); return FALSE; } return TRUE; } RTL_API RTL_SET_DLL_PATH RtlpSetDllPath; _Use_decl_annotations_ BOOL RtlpSetDllPath( PRTL Rtl, PALLOCATOR Allocator, PCUNICODE_STRING Path ) { return AllocateAndCopyUnicodeString(Allocator, Path, &Rtl->RtlDllPath); } RTL_API RTL_SET_INJECTION_THUNK_DLL_PATH RtlpSetInjectionThunkDllPath; _Use_decl_annotations_ BOOL RtlpSetInjectionThunkDllPath( PRTL Rtl, PALLOCATOR Allocator, PCUNICODE_STRING Path ) { return AllocateAndCopyUnicodeString(Allocator, Path, &Rtl->InjectionThunkDllPath); } BOOL InitCrypt32(PRTL Rtl) { if (Rtl->Flags.Crypt32Initialized) { return TRUE; } Rtl->Crypt32Module = LoadLibraryA("crypt32"); if (!Rtl->Crypt32Module) { __debugbreak(); return FALSE; } Rtl->CryptBinaryToStringA = (PCRYPT_BINARY_TO_STRING_A)( GetProcAddress(Rtl->Crypt32Module, "CryptBinaryToStringA") ); if (!Rtl->CryptBinaryToStringA) { __debugbreak(); return FALSE; } Rtl->CryptBinaryToStringW = (PCRYPT_BINARY_TO_STRING_W)( GetProcAddress(Rtl->Crypt32Module, "CryptBinaryToStringW") ); if (!Rtl->CryptBinaryToStringW) { __debugbreak(); return FALSE; } Rtl->Flags.Crypt32Initialized = TRUE; return TRUE; } RTL_API RTL_CREATE_NAMED_EVENT RtlpCreateNamedEvent; _Use_decl_annotations_ BOOL RtlpCreateNamedEvent( PRTL Rtl, PALLOCATOR Allocator, PHANDLE HandlePointer, LPSECURITY_ATTRIBUTES EventAttributes, BOOL ManualReset, BOOL InitialState, PCUNICODE_STRING Prefix, PCUNICODE_STRING Suffix, PUNICODE_STRING EventName ) { BOOL Success; HRESULT Result; ULONG CryptFlags; USHORT BytesRemaining; HANDLE Handle; const UNICODE_STRING Local = RTL_CONSTANT_STRING(L"Local\\"); BYTE LocalBuffer[64]; WCHAR WideBase64Buffer[86]; ULONG WideBase64BufferLengthInChars = ARRAYSIZE(WideBase64Buffer); ULONG WideBase64BufferSizeInBytes = sizeof(WideBase64Buffer); // // Clear the caller's pointer up-front. // *HandlePointer = NULL; if (!Rtl->Flags.Crypt32Initialized) { if (!InitCrypt32(Rtl)) { return FALSE; } } if (EventName->Length != 0) { __debugbreak(); return FALSE; } Result = Rtl->RtlAppendUnicodeStringToString(EventName, &Local); if (FAILED(Result)) { __debugbreak(); return FALSE; } if (ARGUMENT_PRESENT(Prefix)) { Result = Rtl->RtlAppendUnicodeStringToString(EventName, Prefix); if (FAILED(Result)) { __debugbreak(); return FALSE; } } BytesRemaining = ( (EventName->MaximumLength - sizeof(WCHAR)) - EventName->Length ); if (ARGUMENT_PRESENT(Suffix)) { BytesRemaining -= Suffix->Length; } if (BytesRemaining <= 7) { __debugbreak(); return FALSE; } // // Cap the size to the number of bytes remaining. // if (BytesRemaining >= WideBase64BufferSizeInBytes) { BytesRemaining = (USHORT)WideBase64BufferSizeInBytes; } // // (I'm being lazy; just generate 64 bytes of random data into the local // buffer instead of fiddling with exact lengths and whatnot.) // Success = Rtl->CryptGenRandom(Rtl, sizeof(LocalBuffer)-1, (PBYTE)&LocalBuffer); if (!Success) { __debugbreak(); return FALSE; } CryptFlags = CRYPT_STRING_BASE64 | CRYPT_STRING_NOCRLF; Success = Rtl->CryptBinaryToStringW((const PBYTE)&LocalBuffer, sizeof(LocalBuffer)-1, CryptFlags, (LPWSTR)&WideBase64Buffer, &WideBase64BufferLengthInChars); if (!Success) { Rtl->LastError = GetLastError(); __debugbreak(); return FALSE; } // // Forcibly NULL-terminate the wide character buffer based on our number of // bytes remaining. // WideBase64Buffer[(BytesRemaining >> 1)] = L'\0'; // // Copy the random data over. // Result = Rtl->RtlAppendUnicodeToString(EventName, WideBase64Buffer); if (FAILED(Result)) { __debugbreak(); return FALSE; } // // If there was a suffix, copy that over. // if (ARGUMENT_PRESENT(Suffix)) { Result = Rtl->RtlAppendUnicodeStringToString(EventName, Suffix); if (FAILED(Result)) { __debugbreak(); return FALSE; } } // // Invariant checks. // if (EventName->Length >= EventName->MaximumLength) { __debugbreak(); return FALSE; } // // NULL-terminate the Unicode string. // EventName->Buffer[(EventName->Length >> 1)] = L'\0'; // // Now create the event. // Handle = Rtl->CreateEventW(EventAttributes, ManualReset, InitialState, EventName->Buffer); if (!Handle || Handle == INVALID_HANDLE_VALUE) { __debugbreak(); return FALSE; } // // Update the caller's pointer and return TRUE. // *HandlePointer = Handle; return TRUE; } _Use_decl_annotations_ BOOL CreateRandomObjectNames( PRTL Rtl, PALLOCATOR TemporaryAllocator, PALLOCATOR WideBufferAllocator, USHORT NumberOfNames, USHORT LengthOfNameInChars, PUNICODE_STRING NamespacePrefix, PPUNICODE_STRING NamesArrayPointer, PPUNICODE_STRING PrefixArrayPointer, PULONG SizeOfWideBufferInBytes, PPWSTR WideBufferPointer ) /*++ Routine Description: This routine writes Base64-encoded random data to an existing buffer in a format suitable for subsequent use of UNICODE_STRING-based system names for things like events or shared memory handles. Arguments: Rtl - Supplies a pointer to an initialized RTL structure. If the crypto subsystem hasn't yet been initialized, this routine will also initialize it. TemporaryAllocator - Supplies a pointer to an initialized ALLOCATOR struct that this routine will use for temporary allocations. (Any temporarily allocated memory will be freed before the routine returns, regardless of success/error.) WideBufferAllocator - Supplies a pointer to an initialized ALLOCATOR struct that this routine will use to allocate the final wide character buffer that contains the base64-encoded random data. This data will then have the object namespace+prefix and trailing NULL characters overlaid on top of it. (That is, the UNICODE_STRING structures pointed to by the NamesArray will have their Buffer addresses point within this buffer space.) The caller is responsible for freeing this address (which will be received via the output param WideBufferPointer). NumberOfNames - Supplies the number of names that will be carved out of the provided WideBuffer by the caller. This parameter is used in concert with the LengthOfNameInChars parameter to ensure the buffer is laid out in the correct format. LengthOfNameInChars - Supplies the desired length of each name string in characters. This length is assumed to include the trailing NULL and the prefix -- that is, the space required to contain the prefix and trailing NULL will be subtracted from this parameter. For optimal layout, this parameter should be a power of 2 -- with 64 and 128 being good default values. NamespacePrefix - Optionally supplies a pointer to a UNICODE_STRING to use as the namespace (prefix) for each string. If NULL, this value defaults L"Local\\". (If L"Global\\" is used, the caller is responsible for ensuring the SeCreateGlobalPrivilege privilege is enabled.) NamesArrayPointer - Supplies a pointer to the first element of an array of of pointers to UNICODE_STRING structures that will be filled out with the details of the corresponding object name. Sufficient space should be allocated such that the array contains sizeof(UNICODE_STRING) * NumberOfNames in space. PrefixArrayPointer - Optionally supplies a pointer to the first element of an array of pointers to UNICODE_STRING structures which can be used to further customize the name of the object after the namespace but before the random character data. If a NULL pointer resides at a given array element, it is assumed no prefix is desired for this element. SizeOfWideBufferInBytes - Receives the size in bytes of the buffer allocated to store the object names' wide character data. WideBufferPointer - Receives the base address of the object names wide char buffer. Return Value: TRUE on success, FALSE on error. --*/ { BOOL Success; USHORT Index; USHORT Count; LONG PrefixLengthInChars; LONG NumberOfWideBase64CharsToCopy; LONG CharsRemaining; LONG CharsUsed; LONG RandomCharsUsed; LONG FinalCharCount; ULONG CryptFlags; ULONG SizeOfBinaryBufferInBytes; ULONG SizeOfWideBase64BufferInBytes = 0; ULONG OldLengthOfWideBase64BufferInChars; ULONG LengthOfWideBase64BufferInChars; PBYTE BinaryBuffer; PWCHAR Dest; PWCHAR WideBase64Buffer = NULL; PUNICODE_STRING String; PUNICODE_STRING Prefix; PPUNICODE_STRING Prefixes; PUNICODE_STRING Namespace; UNICODE_STRING LocalNamespace = RTL_CONSTANT_STRING(L"Local\\"); // // Validate arguments. // if (ARGUMENT_PRESENT(NamespacePrefix)) { if (!IsValidUnicodeStringWithMinimumLengthInChars(NamespacePrefix,7)) { return FALSE; } Namespace = NamespacePrefix; } else { Namespace = &LocalNamespace; } // // Namespace length should be (far) less than the desired name length. // if (Namespace->Length >= (LengthOfNameInChars << 1)) { __debugbreak(); return FALSE; } // // If the namespace ends with a trailing NULL, omit it by reducing the // length by one wide character. Then, verify the final character is a // slash. // if (Namespace->Buffer[(Namespace->Length >> 1) - 1] == L'\0') { Namespace->Length -= sizeof(WCHAR); } if (Namespace->Buffer[(Namespace->Length >> 1) - 1] != L'\\') { __debugbreak(); return FALSE; } if (ARGUMENT_PRESENT(PrefixArrayPointer)) { Prefixes = PrefixArrayPointer; } else { Prefixes = NULL; } // // Make sure the crypto subsystem is available. // if (!Rtl->Flags.Crypt32Initialized) { if (!InitCrypt32(Rtl)) { return FALSE; } } // // Allocate a buffer for the initial binary data; we generate more random // data than we need here, but it's easier than trying to get everything // exact up-front (i.e. base64->binary size conversions). // // N.B. We use Allocator->Malloc() instead of the Calloc() here as the // existing memory data will contribute as a seed value. // SizeOfBinaryBufferInBytes = NumberOfNames * LengthOfNameInChars; BinaryBuffer = (PBYTE)( TemporaryAllocator->Malloc( TemporaryAllocator->Context, SizeOfBinaryBufferInBytes ) ); if (!BinaryBuffer) { return FALSE; } // // Allocate a wide character buffer for the base64-encoded binary data that // is double the length of the binary buffer -- this is simpler than trying // to get the exact conversion right. // SizeOfWideBase64BufferInBytes = ( NumberOfNames * LengthOfNameInChars * sizeof(WCHAR) * 2 ); WideBase64Buffer = (PWCHAR)( WideBufferAllocator->Calloc( WideBufferAllocator->Context, 1, SizeOfWideBase64BufferInBytes ) ); if (!WideBase64Buffer) { goto Error; } // // We successfully allocated space for our two buffers. Fill the first one // with random data now. // Success = Rtl->CryptGenRandom(Rtl, SizeOfBinaryBufferInBytes, (PBYTE)BinaryBuffer); if (!Success) { Rtl->LastError = GetLastError(); __debugbreak(); goto Error; } // // Convert the entire binary data buffer into base64-encoded wide character // representation. We calculate the number of wide characters the buffer // can receive by shifting the byte size right by one, then copy that value // into a second variable that CryptBinaryToStringW() can overwrite with // the actual length converted. // OldLengthOfWideBase64BufferInChars = SizeOfWideBase64BufferInBytes >> 1; LengthOfWideBase64BufferInChars = OldLengthOfWideBase64BufferInChars; CryptFlags = CRYPT_STRING_BASE64 | CRYPT_STRING_NOCRLF; Success = Rtl->CryptBinaryToStringW(BinaryBuffer, SizeOfBinaryBufferInBytes, CryptFlags, WideBase64Buffer, &LengthOfWideBase64BufferInChars); if (!Success) { Rtl->LastError = GetLastError(); __debugbreak(); goto Error; } // // Conversion of the binary data into base64-encoded data was successful, // so we can free the binary buffer now. // TemporaryAllocator->FreePointer( TemporaryAllocator->Context, &BinaryBuffer ); // // Loop through the array of pointers to UNICODE_STRING structures and fill // each one out, adding the namespace and prefixes accordingly. // RandomCharsUsed = 0; for (Index = 0; Index < NumberOfNames; Index++) { // // Resolve the next unicode string pointer. // String = *(NamesArrayPointer + Index); // // Reset counters. CharsUsed has one subtracted from it in addition // to the namespace length to account for the trailing NULL. // CharsUsed = (Namespace->Length >> 1) + 1; CharsRemaining = (LONG)LengthOfNameInChars - CharsUsed; if (Prefixes && ((Prefix = *(Prefixes + Index)) != NULL)) { // // Omit any trailing NULLs from the custom prefix provided by the // caller, then subtract the prefix length from the remaining bytes // and verify we've got a sensible number left. // PrefixLengthInChars = Prefix->Length >> 1; if (Prefix->Buffer[PrefixLengthInChars - 1] == L'\0') { PrefixLengthInChars -= 1; } CharsUsed += PrefixLengthInChars; CharsRemaining -= PrefixLengthInChars; } else { Prefix = NULL; PrefixLengthInChars = 0; } if (CharsRemaining <= 0) { __debugbreak(); goto Error; } // // Final sanity check that the lengths add up. // NumberOfWideBase64CharsToCopy = CharsRemaining; FinalCharCount = ( (Namespace->Length >> 1) + PrefixLengthInChars + NumberOfWideBase64CharsToCopy + 1 ); if (FinalCharCount != LengthOfNameInChars) { __debugbreak(); goto Error; } // // Everything checks out, fill out the unicode string details and copy // the relevant parts over: namespace, optional prefix and then finally // the random characters. // String->Length = (USHORT)(FinalCharCount << 1) - sizeof(WCHAR); String->MaximumLength = String->Length + sizeof(WCHAR); Dest = String->Buffer = (WideBase64Buffer + RandomCharsUsed); // // Copy the namespace and optionally prefix into the initial part of // the random character data buffer, then NULL terminate the name and // update counters. // Count = Namespace->Length >> 1; __movsw(Dest, Namespace->Buffer, Count); Dest += Count; RandomCharsUsed += Count; if (Prefix) { Count = (USHORT)PrefixLengthInChars; __movsw(Dest, Prefix->Buffer, Count); Dest += Count; RandomCharsUsed += Count; } Count = (USHORT)NumberOfWideBase64CharsToCopy + 1; Dest += Count - 1; *Dest = L'\0'; RandomCharsUsed += Count; } // // We're done, indicate success and finish up. // Success = TRUE; goto End; Error: Success = FALSE; // // Intentional follow-on to End. // End: if (BinaryBuffer) { TemporaryAllocator->FreePointer( TemporaryAllocator->Context, &BinaryBuffer ); } *WideBufferPointer = WideBase64Buffer; *SizeOfWideBufferInBytes = SizeOfWideBase64BufferInBytes; return Success; } _Use_decl_annotations_ BOOL CreateSingleRandomObjectName( PRTL Rtl, PALLOCATOR TemporaryAllocator, PALLOCATOR WideBufferAllocator, PCUNICODE_STRING Prefix, PUNICODE_STRING Name ) /*++ Routine Description: This is a convenience routine that simplifies the task of creating a single, optionally-prefixed, random object name. Behind the scenes, it calls the procedure Rtl->CreateRandomObjectNames(). Arguments: Rtl - Supplies a pointer to an initialized RTL structure. If the crypto subsystem hasn't yet been initialized, this routine will also initialize it. TemporaryAllocator - Supplies a pointer to an initialized ALLOCATOR struct that this routine will use for temporary allocations. (Any temporarily allocated memory will be freed before the routine returns, regardless of success/error.) WideBufferAllocator - Supplies a pointer to an initialized ALLOCATOR struct that this routine will use to allocate the final wide character buffer that contains the base64-encoded random data. This data will then have the prefix and trailing NULL characters overlaid on top of it. (That is, the UNICODE_STRING structure pointed to by the Name parameter will have its Buffer address point within this buffer.) The caller is responsible for freeing this address (Name->Buffer). Prefix - Optionally supplies the address of a UNICODE_STRING structure to use as the prefix for the object name. This will be appended after the namespace name and before the random base64-encoded data. Name - Supplies the address of a UNICODE_STRING structure that will receive the details of the newly-created object name. The caller is responsible for freeing the address at Name->Buffer via Allocator. Return Value: TRUE on success, FALSE on error. --*/ { BOOL Success; ULONG SizeOfBuffer; PWSTR WideBuffer; PUNICODE_STRING Names[1]; PCUNICODE_STRING Prefixes[1]; // // Validate arguments. // if (!ARGUMENT_PRESENT(Name)) { return FALSE; } // // Initialize the arrays. // Names[0] = Name; Prefixes[0] = Prefix; Success = Rtl->CreateRandomObjectNames(Rtl, TemporaryAllocator, WideBufferAllocator, 1, 64, NULL, (PPUNICODE_STRING)&Names, (PPUNICODE_STRING)&Prefixes, &SizeOfBuffer, &WideBuffer); return Success; } BOOL InitializeTsx(PRTL Rtl) { GUARDED_LIST List; LIST_ENTRY Entry; InitializeGuardedListHead(&List); InitializeListHead(&Entry); Rtl->Flags.TsxAvailable = TRUE; TRY_AVX { InsertTailGuardedListTsxInline(&List, &Entry); } CATCH_EXCEPTION_ILLEGAL_INSTRUCTION { Rtl->Flags.TsxAvailable = FALSE; } return TRUE; } VIRTUAL_ALLOC RtlpTryLargePageVirtualAlloc; _Use_decl_annotations_ LPVOID RtlpLargePageVirtualAlloc( LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect ) { return VirtualAlloc(lpAddress, max(dwSize, GetLargePageMinimum()), flAllocationType | MEM_LARGE_PAGES, flProtect); } VIRTUAL_ALLOC_EX RtlpTryLargePageVirtualAllocEx; _Use_decl_annotations_ LPVOID RtlpLargePageVirtualAllocEx( HANDLE hProcess, LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect ) { return VirtualAllocEx(hProcess, lpAddress, max(dwSize, GetLargePageMinimum()), flAllocationType | MEM_LARGE_PAGES, flProtect); } BOOL InitializeLargePages(PRTL Rtl) { Rtl->Flags.IsLargePageEnabled = Rtl->EnableLockMemoryPrivilege(); Rtl->LargePageMinimum = GetLargePageMinimum(); if (Rtl->Flags.IsLargePageEnabled) { Rtl->TryLargePageVirtualAlloc = RtlpLargePageVirtualAlloc; Rtl->TryLargePageVirtualAllocEx = RtlpLargePageVirtualAllocEx; } else { Rtl->TryLargePageVirtualAlloc = VirtualAlloc; Rtl->TryLargePageVirtualAllocEx = VirtualAllocEx; } return TRUE; } PVOID TryMapViewOfFileNuma2( PRTL Rtl, HANDLE FileMappingHandle, HANDLE ProcessHandle, ULONG64 Offset, PVOID BaseAddress, SIZE_T ViewSize, ULONG AllocationType, ULONG PageProtection, ULONG PreferredNode ) { LARGE_INTEGER FileOffset; if (!Rtl->MapViewOfFileNuma2) { goto Fallback; } AllocationType = FilterLargePageFlags(Rtl, AllocationType); return Rtl->MapViewOfFileNuma2(FileMappingHandle, ProcessHandle, Offset, BaseAddress, ViewSize, AllocationType, PageProtection, PreferredNode); Fallback: FileOffset.QuadPart = Offset; return Rtl->MapViewOfFileExNuma(FileMappingHandle, PageProtection, FileOffset.HighPart, FileOffset.LowPart, ViewSize, BaseAddress, PreferredNode); } RTL_API PROBE_FOR_READ ProbeForRead; _Use_decl_annotations_ BOOL ProbeForRead( PRTL Rtl, PVOID Address, SIZE_T NumberOfBytes, PULONG NumberOfValidPages ) { BOOL Success; ULONG Index; ULONG ValidPages; ULONG NumberOfPages; PBYTE Byte; PBYTE Buffer; SIZE_T PageAlignedSize; ValidPages = 0; PageAlignedSize = ROUND_TO_PAGES(NumberOfBytes); NumberOfPages = (ULONG)(PageAlignedSize >> PAGE_SHIFT); Buffer = (PBYTE)Address; Success = TRUE; TRY_PROBE_MEMORY { for (Index = 0; Index < NumberOfPages; Index++) { Byte = Buffer + (Index * (1 << PAGE_SHIFT)); PrefaultPage(Byte); ValidPages++; } } CATCH_STATUS_IN_PAGE_ERROR_OR_ACCESS_VIOLATION { Success = FALSE; } if (ARGUMENT_PRESENT(NumberOfValidPages)) { *NumberOfValidPages = ValidPages; } return Success; } RTL_API LOAD_FILE LoadFile; _Use_decl_annotations_ BOOL LoadFile( PRTL Rtl, LOAD_FILE_FLAGS Flags, PCUNICODE_STRING Path, PHANDLE FileHandlePointer, PHANDLE MappingHandlePointer, PPVOID BaseAddressPointer ) { BOOL Success; ULONG LastError; HANDLE FileHandle = NULL; PVOID BaseAddress = NULL; HANDLE MappingHandle = NULL; // // Clear the caller's pointers up-front. // *FileHandlePointer = NULL; *BaseAddressPointer = NULL; *MappingHandlePointer = NULL; FileHandle = CreateFileW(Path->Buffer, GENERIC_READ, FILE_SHARE_READ, NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED | FILE_ATTRIBUTE_NORMAL | FILE_FLAG_SEQUENTIAL_SCAN, NULL); if (!FileHandle || FileHandle == INVALID_HANDLE_VALUE) { FileHandle = NULL; LastError = GetLastError(); __debugbreak(); goto Error; } MappingHandle = CreateFileMappingNuma(FileHandle, NULL, PAGE_READONLY, 0, 0, NULL, NUMA_NO_PREFERRED_NODE); if (!MappingHandle || MappingHandle == INVALID_HANDLE_VALUE) { MappingHandle = NULL; LastError = GetLastError(); __debugbreak(); goto Error; } BaseAddress = Rtl->MapViewOfFileExNuma(MappingHandle, FILE_MAP_READ, 0, 0, 0, BaseAddress, NUMA_NO_PREFERRED_NODE); if (!BaseAddress) { LastError = GetLastError(); __debugbreak(); goto Error; } // // We've successfully opened, created a section for, and then subsequently // mapped, the requested PTX file. Update the caller's pointers and return // success. // *FileHandlePointer = FileHandle; *BaseAddressPointer = BaseAddress; *MappingHandlePointer = MappingHandle; Success = TRUE; goto End; Error: if (MappingHandle) { CloseHandle(MappingHandle); MappingHandle = NULL; } if (FileHandle) { CloseHandle(FileHandle); FileHandle = NULL; } Success = FALSE; // // Intentional follow-on to End. // End: return Success; } _Use_decl_annotations_ BOOL InitializeRtl( PRTL Rtl, PULONG SizeOfRtl ) { BOOL Success; HANDLE HeapHandle; PRTL_LDR_NOTIFICATION_TABLE Table; if (!Rtl) { if (SizeOfRtl) { *SizeOfRtl = sizeof(*Rtl); } return FALSE; } if (!SizeOfRtl) { return FALSE; } if (*SizeOfRtl < sizeof(*Rtl)) { *SizeOfRtl = sizeof(*Rtl); return FALSE; } else { *SizeOfRtl = sizeof(*Rtl); } HeapHandle = GetProcessHeap(); if (!HeapHandle) { return FALSE; } SecureZeroMemory(Rtl, sizeof(*Rtl)); if (!LoadRtlSymbols(Rtl)) { return FALSE; } Rtl->SizeOfStruct = sizeof(*Rtl); SetCSpecificHandler(Rtl->NtdllModule); Rtl->__C_specific_handler = __C_specific_handler_impl; if (!Rtl->__C_specific_handler) { return FALSE; } Rtl->HeapHandle = HeapHandle; if (!LoadRtlExSymbols(NULL, Rtl)) { return FALSE; } if (!InitializeWindowsDirectories(Rtl)) { return FALSE; } if (!InitializeTsx(Rtl)) { return FALSE; } if (!InitializeLargePages(Rtl)) { return FALSE; } Rtl->atexit = atexit_impl; Rtl->AtExitEx = AtExitExImpl; Rtl->RundownGlobalAtExitFunctions = RundownGlobalAtExitFunctions; Rtl->GetCu = GetCu; // // Windows 8 onward. // Rtl->MapViewOfFileExNuma = (PMAP_VIEW_OF_FILE_EX_NUMA)( GetProcAddress( Rtl->Kernel32Module, "MapViewOfFileExNuma" ) ); // // Windows 10 1703 onward. // Rtl->MapViewOfFileNuma2 = (PMAP_VIEW_OF_FILE_NUMA2)( GetProcAddress( Rtl->KernelBaseModule, "MapViewOfFileNuma2" ) ); Rtl->TryMapViewOfFileNuma2 = TryMapViewOfFileNuma2; Rtl->OutputDebugStringA = OutputDebugStringA; Rtl->OutputDebugStringW = OutputDebugStringW; Rtl->MaximumFileSectionSize = Rtl->MmGetMaximumFileSectionSize(); Table = Rtl->LoaderNotificationTable = (PRTL_LDR_NOTIFICATION_TABLE)( HeapAlloc( HeapHandle, HEAP_ZERO_MEMORY, sizeof(*Rtl->LoaderNotificationTable) ) ); if (!Table) { return FALSE; } Success = InitializeRtlLdrNotificationTable(Rtl, Table); if (!Success) { HeapFree(HeapHandle, 0, Table); Rtl->LoaderNotificationTable = NULL; } Rtl->Multiplicand.QuadPart = TIMESTAMP_TO_SECONDS; QueryPerformanceFrequency(&Rtl->Frequency); Success = CryptAcquireContextW(&Rtl->CryptProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT); if (!Success) { Rtl->LastError = GetLastError(); return FALSE; } Rtl->CryptGenRandom = RtlCryptGenRandom; Rtl->CreateEventA = CreateEventA; Rtl->CreateEventW = CreateEventW; Rtl->InitializeCom = InitializeCom; Rtl->LoadDbgEng = LoadDbgEng; Rtl->FindAndReplaceByte = RtlFindAndReplaceByte; Rtl->CopyPages = CopyPagesNonTemporalAvx2_v4; Rtl->FillPages = FillPagesNonTemporalAvx2_v1; Rtl->ProbeForRead = ProbeForRead; Rtl->SetDllPath = RtlpSetDllPath; Rtl->CreateNamedEvent = RtlpCreateNamedEvent; Rtl->CreateRandomObjectNames = CreateRandomObjectNames; Rtl->CreateSingleRandomObjectName = CreateSingleRandomObjectName; Rtl->LoadSymbols = LoadSymbols; Rtl->LoadSymbolsFromMultipleModules = LoadSymbolsFromMultipleModules; #ifdef _RTL_TEST Rtl->TestLoadSymbols = TestLoadSymbols; Rtl->TestLoadSymbolsFromMultipleModules = ( TestLoadSymbolsFromMultipleModules ); #endif return Success; } RTL_API BOOL InitializeRtlManually(PRTL Rtl, PULONG SizeOfRtl) { return InitializeRtlManuallyInline(Rtl, SizeOfRtl); } _Use_decl_annotations_ VOID DestroyRtl( PPRTL RtlPointer ) { PRTL Rtl; if (!ARGUMENT_PRESENT(RtlPointer)) { return; } Rtl = *RtlPointer; if (!ARGUMENT_PRESENT(Rtl)) { return; } // // Clear the caller's pointer straight away. // *RtlPointer = NULL; if (Rtl->NtdllModule) { FreeLibrary(Rtl->NtdllModule); Rtl->NtdllModule = NULL; } if (Rtl->Kernel32Module) { FreeLibrary(Rtl->Kernel32Module); Rtl->Kernel32Module = NULL; } if (Rtl->KernelBaseModule) { FreeLibrary(Rtl->KernelBaseModule); Rtl->KernelBaseModule = NULL; } if (Rtl->NtosKrnlModule) { FreeLibrary(Rtl->NtosKrnlModule); Rtl->NtosKrnlModule = NULL; } return; } VOID Debugbreak() { __debugbreak(); } _Use_decl_annotations_ PLIST_ENTRY RemoveHeadGuardedListTsx( PGUARDED_LIST GuardedList ) { return RemoveHeadGuardedListTsxInline(GuardedList); } _Use_decl_annotations_ PLIST_ENTRY RemoveTailGuardedListTsx( PGUARDED_LIST GuardedList ) { return RemoveTailGuardedListTsxInline(GuardedList); } _Use_decl_annotations_ VOID InsertTailGuardedListTsx( PGUARDED_LIST GuardedList, PLIST_ENTRY Entry ) { InsertTailGuardedListTsxInline(GuardedList, Entry); } _Use_decl_annotations_ VOID AppendTailGuardedListTsx( PGUARDED_LIST GuardedList, PLIST_ENTRY Entry ) { AppendTailGuardedListTsxInline(GuardedList, Entry); } #ifndef VECTORCALL #define VECTORCALL __vectorcall #endif RTL_API XMMWORD VECTORCALL DummyVectorCall1( _In_ XMMWORD Xmm0, _In_ XMMWORD Xmm1, _In_ XMMWORD Xmm2, _In_ XMMWORD Xmm3 ) { XMMWORD Temp1; XMMWORD Temp2; Temp1 = _mm_xor_si128(Xmm0, Xmm1); Temp2 = _mm_xor_si128(Xmm2, Xmm3); return _mm_xor_si128(Temp1, Temp2); } typedef struct _TEST_HVA3 { XMMWORD X; XMMWORD Y; XMMWORD Z; } TEST_HVA3; RTL_API TEST_HVA3 VECTORCALL DummyHvaCall1( _In_ TEST_HVA3 Hva3 ) { Hva3.X = _mm_xor_si128(Hva3.Y, Hva3.Z); return Hva3; } #if 0 typedef struct _TEST_HFA3 { DOUBLE X; DOUBLE Y; DOUBLE Z; } TEST_HFA3; RTL_API TEST_HFA3 VECTORCALL DummyHfaCall1( _In_ TEST_HFA3 Hfa3 ) { __m128d Double; Double = _mm_setr_pd(Hfa3.Y, Hfa3.Z); Hfa3.X = Double.m128d_f64[0]; return Hfa3; } #endif // vim:set ts=8 sw=4 sts=4 tw=80 expandtab :
24.146894
80
0.599659
[ "object" ]
aeb9c92a9e5e60aa24baaae6f481af167a7b5f37
3,040
h
C
src/cge/cge/udp_server.h
lmhMike/liuguang
1474492ac4de7827cf40c9d7845c99eca0f9727c
[ "Apache-2.0" ]
null
null
null
src/cge/cge/udp_server.h
lmhMike/liuguang
1474492ac4de7827cf40c9d7845c99eca0f9727c
[ "Apache-2.0" ]
null
null
null
src/cge/cge/udp_server.h
lmhMike/liuguang
1474492ac4de7827cf40c9d7845c99eca0f9727c
[ "Apache-2.0" ]
null
null
null
/* * Copyright 2020-present Ksyun * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include <Xinput.h> #include <boost/bind/bind.hpp> #include "net.hpp" #include "cgvhid_client.h" #include "engine.h" #include "regame/control.h" class UdpServer : public std::enable_shared_from_this<UdpServer> { public: UdpServer(Engine& engine, udp::endpoint endpoint, std::vector<uint8_t> disable_keys, KeyboardReplay keyboard_replay, GamepadReplay gamepad_replay); ~UdpServer(); void Run() { Read(); } void Stop() noexcept { boost::system::error_code ec; // socket_.cancel(ec); socket_.close(ec); } private: void Read() { socket_.async_receive_from( net::buffer(recv_buffer_), remote_endpoint_, boost::bind(&UdpServer::OnRead, shared_from_this(), net::placeholders::error, net::placeholders::bytes_transferred)); } void OnRead(const boost::system::error_code& ec, std::size_t bytes_transferred); void OnWrite(const boost::system::error_code& ec, std::size_t bytes_transferred); void OnControlEvent(std::size_t bytes_transferred) noexcept; void OnKeyboardEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; void OnKeyboardVkEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; void OnJoystickAxisEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; void OnJoystickButtonEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; void OnJoystickHatEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; void OnGamepadAxisEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; void OnGamepadButtonEvent(std::size_t bytes_transferred, ControlElement* control_element) noexcept; private: Engine& engine_; udp::socket socket_; udp::endpoint remote_endpoint_; std::array<char, 65536> recv_buffer_{}; std::array<bool, 256> disable_keys_{}; KeyboardReplay keyboard_replay_; CgvhidClient cgvhid_client_; GamepadReplay gamepad_replay_; std::shared_ptr<class ViGEmClient> vigem_client_; std::shared_ptr<class ViGEmTargetX360> vigem_target_x360_; XINPUT_GAMEPAD gamepad_state_{}; };
32.688172
75
0.691447
[ "vector" ]
aebd3eb8be685bce8d055c727051593b5b0deb53
3,377
h
C
kernel/task.h
KeeProMise/KePOS
0d26a46bfb5ef6514fe59261e4864c935a6fd614
[ "Apache-2.0" ]
72
2021-04-20T11:36:21.000Z
2022-01-26T15:00:29.000Z
kernel/task.h
buger-beep/KePOS
9573946fce57da54f1ee89af1551fba0fb821d31
[ "Apache-2.0" ]
null
null
null
kernel/task.h
buger-beep/KePOS
9573946fce57da54f1ee89af1551fba0fb821d31
[ "Apache-2.0" ]
2
2021-04-21T16:07:41.000Z
2021-07-12T08:35:13.000Z
#ifndef TASK #define TASK #define NEW 0 #define READY 1 #define RUNNING 2 #define WAIT 3 #include "window.h" #include "device.h" #include "global.h" struct Register{ unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; unsigned long r11; unsigned long r10; unsigned long r9; unsigned long r8; unsigned long rbx; unsigned long rcx; unsigned long rdx; unsigned long rsi; unsigned long rdi; unsigned long rbp; unsigned long ds; unsigned long es; unsigned long rax; unsigned long rip; unsigned long cs; unsigned long rflagc; unsigned long rsp; unsigned long ss; }; struct PageBuffer{ unsigned long * buf; unsigned long bufferLength; unsigned long head; unsigned long tail; }; struct Task{ unsigned long id; unsigned long cr3; struct PageBuffer pages; unsigned long structPage; //unsigned long kernelRsp; unsigned long state; unsigned long path; struct Window * window; struct Register registers; struct Task * next; struct Task * prev; } NullTaskReady,NullTaskRunning,NullTaskWait,NullReeTrantLock; struct TaskManage{ struct Task * readys; struct Task * running; struct Task * waits; unsigned long count; }; struct ReeTrantLock{ volatile unsigned long count; volatile struct Task * nowTask; volatile struct Task * waitTask; }; //void test(); //lock 相关 void reetrantlock(); void reetrantUnLock(); //Buffer相关 void initPagesBuffer(struct PageBuffer * buffer,unsigned long bufferSize); void backPagesBuffer(struct PageBuffer * buffer); unsigned long isPagesEmpty(struct PageBuffer * buffer){ if(buffer->head == buffer->tail) return True; return False; } unsigned long isPagesFull(struct PageBuffer * buffer){ if(((buffer->tail+1)%buffer->bufferLength) == buffer->head) return True; return False; } void insertPagesBuffer(struct PageBuffer * buffer,unsigned long c){ *(buffer->buf + buffer->tail) = c; buffer->tail = (buffer->tail+1)%buffer->bufferLength; } unsigned long deleteAndreturnPage(struct PageBuffer * buffer){ unsigned long c = *(buffer->buf + buffer->head); buffer->head = (buffer->head+1)%buffer->bufferLength; return c; } void initTaskNode(struct Task * task,unsigned long maxUsePages); void insertTaskBefore(struct Task * Object,struct Task * Target){ Target->next = Object; Target->prev = Object->prev; Object->prev = Target; Target->prev->next = Target; } void insertTaskBehand(struct Task * Object,struct Task * Target){ Target->next = Object->next; Target->prev = Object; Object->next = Target; Target->next->prev = Target; } void deleteTargetTask(struct Task * Target){ Target->prev->next = Target->next; Target->next->prev = Target->prev; Target->prev = Target; Target->next = Target; } void closeInterrupt(){ __asm__ __volatile__ ("cli \n\t"); } void openInterrupt(){ __asm__ __volatile__ ("sti \n\t"); } void initAtomUserTask(); void exitNowtask(); void initUserTask(void * userTask); void initKernelTask(void * taskMainFunvirAddress,unsigned long rsp); void changeTask(unsigned long rsp); void backTaskRegisterTogStick(struct Task * task,unsigned long rsp); void saveTaskRegisterTogStick(struct Task * task,unsigned long rsp); void taskMain(); #endif
24.121429
76
0.697661
[ "object" ]
aec36f7ccc0068120aeb64967da73bedffb237cb
12,856
c
C
kernels/linux-2.4.0/arch/parisc/kernel/irq.c
liuhaozzu/linux
bdf9758cd23e34b5f53e8e6339d9b29348615e14
[ "Apache-2.0" ]
null
null
null
kernels/linux-2.4.0/arch/parisc/kernel/irq.c
liuhaozzu/linux
bdf9758cd23e34b5f53e8e6339d9b29348615e14
[ "Apache-2.0" ]
null
null
null
kernels/linux-2.4.0/arch/parisc/kernel/irq.c
liuhaozzu/linux
bdf9758cd23e34b5f53e8e6339d9b29348615e14
[ "Apache-2.0" ]
null
null
null
/* $Id: irq.c,v 1.8 2000/02/08 02:01:17 grundler Exp $ * * Code to handle x86 style IRQs plus some generic interrupt stuff. * * This is not in any way SMP-clean. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle * Copyright (C) 1999 SuSE GmbH (Author: Philipp Rumpf, prumpf@tux.org) * Copyright (C) 2000 Hewlett Packard Corp (Co-Author: Grant Grundler, grundler@cup.hp.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/config.h> #include <linux/bitops.h> #include <asm/bitops.h> #include <asm/pdc.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/ioport.h> #include <linux/timex.h> #include <linux/malloc.h> #include <linux/random.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/cache.h> #undef DEBUG_IRQ extern void timer_interrupt(int, void *, struct pt_regs *); extern void ipi_interrupt(int, void *, struct pt_regs *); #ifdef DEBUG_IRQ #define DBG_IRQ(x...) printk(x) #else /* DEBUG_IRQ */ #define DBG_IRQ(x...) #endif /* DEBUG_IRQ */ #define EIEM_MASK(irq) (1L<<(MAX_CPU_IRQ-IRQ_OFFSET(irq))) #define CLEAR_EIEM_BIT(irq) set_eiem(get_eiem() & ~EIEM_MASK(irq)) #define SET_EIEM_BIT(irq) set_eiem(get_eiem() | EIEM_MASK(irq)) static void disable_cpu_irq(void *unused, int irq) { CLEAR_EIEM_BIT(irq); } static void enable_cpu_irq(void *unused, int irq) { unsigned long mask = EIEM_MASK(irq); mtctl(mask, 23); SET_EIEM_BIT(irq); } static struct irqaction cpu_irq_actions[IRQ_PER_REGION] = { [IRQ_OFFSET(TIMER_IRQ)] { timer_interrupt, 0, 0, "timer", NULL, NULL }, [IRQ_OFFSET(IPI_IRQ)] { ipi_interrupt, 0, 0, "IPI", NULL, NULL }, }; struct irq_region cpu_irq_region = { { disable_cpu_irq, enable_cpu_irq, NULL, NULL }, { &cpu_data[0], "PA-PIC", IRQ_REG_MASK|IRQ_REG_DIS, IRQ_FROM_REGION(CPU_IRQ_REGION)}, cpu_irq_actions }; struct irq_region *irq_region[NR_IRQ_REGS] = { [ 0 ] NULL, /* abuse will data page fault (aka code 15) */ [ CPU_IRQ_REGION ] &cpu_irq_region, }; /* we special-case the real IRQs here, which feels right given the relatively * high cost of indirect calls. If anyone is bored enough to benchmark this * and find out whether I am right, feel free to. prumpf */ static inline void mask_irq(int irq) { struct irq_region *region; #ifdef DEBUG_IRQ if (irq != TIMER_IRQ) #endif DBG_IRQ("mask_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)); if(IRQ_REGION(irq) != CPU_IRQ_REGION) { region = irq_region[IRQ_REGION(irq)]; if(region->data.flags & IRQ_REG_MASK) region->ops.mask_irq(region->data.dev, IRQ_OFFSET(irq)); } else { CLEAR_EIEM_BIT(irq); } } static inline void unmask_irq(int irq) { struct irq_region *region; #ifdef DEBUG_IRQ if (irq != TIMER_IRQ) #endif DBG_IRQ("unmask_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)); if(IRQ_REGION(irq) != CPU_IRQ_REGION) { region = irq_region[IRQ_REGION(irq)]; if(region->data.flags & IRQ_REG_MASK) region->ops.unmask_irq(region->data.dev, IRQ_OFFSET(irq)); } else { SET_EIEM_BIT(irq); } } void disable_irq(int irq) { struct irq_region *region; #ifdef DEBUG_IRQ if (irq != TIMER_IRQ) #endif DBG_IRQ("disable_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)); region = irq_region[IRQ_REGION(irq)]; if(region->data.flags & IRQ_REG_DIS) region->ops.disable_irq(region->data.dev, IRQ_OFFSET(irq)); else BUG(); } void enable_irq(int irq) { struct irq_region *region; #ifdef DEBUG_IRQ if (irq != TIMER_IRQ) #endif DBG_IRQ("enable_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)); region = irq_region[IRQ_REGION(irq)]; if(region->data.flags & IRQ_REG_DIS) region->ops.enable_irq(region->data.dev, IRQ_OFFSET(irq)); else BUG(); } int get_irq_list(char *buf) { #ifdef CONFIG_PROC_FS char *p = buf; int i, j; int regnr, irq_no; struct irq_region *region; struct irqaction *action, *mainaction; p += sprintf(p, " "); for (j=0; j<smp_num_cpus; j++) p += sprintf(p, "CPU%d ",j); *p++ = '\n'; for (regnr = 0; regnr < NR_IRQ_REGS; regnr++) { region = irq_region[regnr]; if (!region || !region->action) continue; mainaction = region->action; for (i = 0; i <= MAX_CPU_IRQ; i++) { action = mainaction++; if (!action || !action->name) continue; irq_no = IRQ_FROM_REGION(regnr) + i; p += sprintf(p, "%3d: ", irq_no); #ifndef CONFIG_SMP p += sprintf(p, "%10u ", kstat_irqs(irq_no)); #else for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", kstat.irqs[cpu_logical_map(j)][irq_no]); #endif p += sprintf(p, " %14s", region->data.name ? region->data.name : "N/A"); p += sprintf(p, " %s", action->name); for (action=action->next; action; action = action->next) p += sprintf(p, ", %s", action->name); *p++ = '\n'; } } p += sprintf(p, "\n"); #if CONFIG_SMP p += sprintf(p, "LOC: "); for (j = 0; j < smp_num_cpus; j++) p += sprintf(p, "%10u ", apic_timer_irqs[cpu_logical_map(j)]); p += sprintf(p, "\n"); #endif return p - buf; #else /* CONFIG_PROC_FS */ return 0; #endif /* CONFIG_PROC_FS */ } /* ** The following form a "set": Virtual IRQ, Transaction Address, Trans Data. ** Respectively, these map to IRQ region+EIRR, Processor HPA, EIRR bit. ** ** To use txn_XXX() interfaces, get a Virtual IRQ first. ** Then use that to get the Transaction address and data. */ int txn_alloc_irq(void) { int irq; /* never return irq 0 cause that's the interval timer */ for(irq=1; irq<=MAX_CPU_IRQ; irq++) { if(cpu_irq_region.action[irq].handler == NULL) { return (IRQ_FROM_REGION(CPU_IRQ_REGION) + irq); } } /* unlikely, but be prepared */ return -1; } int txn_claim_irq(int irq) { if (irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)].handler ==NULL) { return irq; } /* unlikely, but be prepared */ return -1; } unsigned long txn_alloc_addr(int virt_irq) { struct cpuinfo_parisc *dev = (struct cpuinfo_parisc *) (irq_region[IRQ_REGION(virt_irq)]->data.dev); if (0==dev) { printk(KERN_ERR "txn_alloc_addr(0x%x): CPU IRQ region? dev %p\n", virt_irq,dev); return(0UL); } return (dev->txn_addr); } /* ** The alloc process needs to accept a parameter to accomodate limitations ** of the HW/SW which use these bits: ** Legacy PA I/O (GSC/NIO): 5 bits (architected EIM register) ** V-class (EPIC): 6 bits ** N/L-class/A500: 8 bits (iosapic) ** PCI 2.2 MSI: 16 bits (I think) ** Existing PCI devices: 32-bits (NCR c720/ATM/GigE/HyperFabric) ** ** On the service provider side: ** o PA 1.1 (and PA2.0 narrow mode) 5-bits (width of EIR register) ** o PA 2.0 wide mode 6-bits (per processor) ** o IA64 8-bits (0-256 total) ** ** So a Legacy PA I/O device on a PA 2.0 box can't use all ** the bits supported by the processor...and the N/L-class ** I/O subsystem supports more bits than PA2.0 has. The first ** case is the problem. */ unsigned int txn_alloc_data(int virt_irq, unsigned int bits_wide) { /* XXX FIXME : bits_wide indicates how wide the transaction ** data is allowed to be...we may need a different virt_irq ** if this one won't work. Another reason to index virtual ** irq's into a table which can manage CPU/IRQ bit seperately. */ if (IRQ_OFFSET(virt_irq) > (1 << (bits_wide -1))) { panic("Sorry -- didn't allocate valid IRQ for this device\n"); } return(IRQ_OFFSET(virt_irq)); } /* FIXME: SMP, flags, bottom halves, rest */ void do_irq(struct irqaction *action, int irq, struct pt_regs * regs) { int cpu = smp_processor_id(); irq_enter(cpu, irq); #ifdef DEBUG_IRQ if (irq != TIMER_IRQ) #endif DBG_IRQ("do_irq(%d) %d+%d\n", irq, IRQ_REGION(irq), IRQ_OFFSET(irq)); if (action->handler == NULL) printk(KERN_ERR "No handler for interrupt %d !\n", irq); for(; action && action->handler; action = action->next) { action->handler(irq, action->dev_id, regs); } irq_exit(cpu, irq); /* don't need to care about unmasking and stuff */ do_softirq(); } void do_irq_mask(unsigned long mask, struct irq_region *region, struct pt_regs *regs) { unsigned long bit; int irq; int cpu = smp_processor_id(); #ifdef DEBUG_IRQ if (mask != (1L << MAX_CPU_IRQ)) printk("do_irq_mask %08lx %p %p\n", mask, region, regs); #endif for(bit=(1L<<MAX_CPU_IRQ), irq = 0; mask && bit; bit>>=1, irq++) { int irq_num; if(!(bit&mask)) continue; irq_num = region->data.irqbase + irq; ++kstat.irqs[cpu][IRQ_FROM_REGION(CPU_IRQ_REGION) | irq]; if (IRQ_REGION(irq_num) != CPU_IRQ_REGION) ++kstat.irqs[cpu][irq_num]; mask_irq(irq_num); do_irq(&region->action[irq], irq_num, regs); unmask_irq(irq_num); } } static inline int alloc_irqregion(void) { int irqreg; for(irqreg=1; irqreg<=(NR_IRQ_REGS); irqreg++) { if(irq_region[irqreg] == NULL) return irqreg; } return 0; } struct irq_region *alloc_irq_region( int count, struct irq_region_ops *ops, unsigned long flags, const char *name, void *dev) { struct irq_region *region; int index; index = alloc_irqregion(); if((IRQ_REGION(count-1))) return NULL; if (count < IRQ_PER_REGION) { DBG_IRQ("alloc_irq_region() using minimum of %d irq lines for %s (%d)\n", IRQ_PER_REGION, name, count); count = IRQ_PER_REGION; } if(flags & IRQ_REG_MASK) if(!(ops->mask_irq && ops->unmask_irq)) return NULL; if(flags & IRQ_REG_DIS) if(!(ops->disable_irq && ops->enable_irq)) return NULL; if((irq_region[index])) return NULL; region = kmalloc(sizeof *region, GFP_ATOMIC); if(!region) return NULL; region->action = kmalloc(sizeof *region->action * count, GFP_ATOMIC); if(!region->action) { kfree(region); return NULL; } memset(region->action, 0, sizeof *region->action * count); region->ops = *ops; region->data.irqbase = IRQ_FROM_REGION(index); region->data.flags = flags; region->data.name = name; region->data.dev = dev; irq_region[index] = region; return irq_region[index]; } /* FIXME: SMP, flags, bottom halves, rest */ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *), unsigned long irqflags, const char * devname, void *dev_id) { struct irqaction * action; #if 0 printk(KERN_INFO "request_irq(%d, %p, 0x%lx, %s, %p)\n",irq, handler, irqflags, devname, dev_id); #endif if(!handler) { printk(KERN_ERR "request_irq(%d,...): Augh! No handler for irq!\n", irq); return -EINVAL; } if ((IRQ_REGION(irq) == 0) || irq_region[IRQ_REGION(irq)] == NULL) { /* ** Bug catcher for drivers which use "char" or u8 for ** the IRQ number. They lose the region number which ** is in pcidev->irq (an int). */ printk(KERN_ERR "%p (%s?) called request_irq with an invalid irq %d\n", __builtin_return_address(0), devname, irq); return -EINVAL; } action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)]; if(action->handler) { while(action->next) action = action->next; action->next = kmalloc(sizeof *action, GFP_ATOMIC); action = action->next; } if(!action) { printk(KERN_ERR "request_irq():Augh! No action!\n") ; return -ENOMEM; } action->handler = handler; action->flags = irqflags; action->mask = 0; action->name = devname; action->next = NULL; action->dev_id = dev_id; enable_irq(irq); return 0; } void free_irq(unsigned int irq, void *dev_id) { struct irqaction *action, **p; action = &irq_region[IRQ_REGION(irq)]->action[IRQ_OFFSET(irq)]; if(action->dev_id == dev_id) { if(action->next == NULL) action->handler = NULL; else memcpy(action, action->next, sizeof *action); return; } p = &action->next; action = action->next; for (; (action = *p) != NULL; p = &action->next) { if (action->dev_id != dev_id) continue; /* Found it - now free it */ *p = action->next; kfree(action); return; } printk(KERN_ERR "Trying to free free IRQ%d\n",irq); } unsigned long probe_irq_on (void) { return 0; } int probe_irq_off (unsigned long irqs) { return 0; } void __init init_IRQ(void) { } void init_irq_proc(void) { }
23.851577
101
0.666304
[ "3d" ]
aec55b6c16d4c1b17e5f765a852e5a6628f37f25
3,938
h
C
src/update_engine/omaha_hash_calculator.h
remarkableno/update_engine
cdffd37e3f005b7738bf596b9bc8d4155e4c7e15
[ "BSD-3-Clause" ]
12
2019-06-26T18:32:06.000Z
2022-03-01T17:32:36.000Z
src/update_engine/omaha_hash_calculator.h
remarkableno/update_engine
cdffd37e3f005b7738bf596b9bc8d4155e4c7e15
[ "BSD-3-Clause" ]
6
2018-11-22T21:11:34.000Z
2022-02-24T13:36:57.000Z
src/update_engine/omaha_hash_calculator.h
remarkableno/update_engine
cdffd37e3f005b7738bf596b9bc8d4155e4c7e15
[ "BSD-3-Clause" ]
5
2018-06-24T13:19:40.000Z
2020-10-15T17:10:56.000Z
// Copyright (c) 2009 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROMEOS_PLATFORM_UPDATE_ENGINE_OMAHA_HASH_CALCULATOR_H__ #define CHROMEOS_PLATFORM_UPDATE_ENGINE_OMAHA_HASH_CALCULATOR_H__ #include <string> #include <unistd.h> #include <vector> #include <glog/logging.h> #include <openssl/sha.h> #include "macros.h" // Omaha uses base64 encoded SHA-256 as the hash. This class provides a simple // wrapper around OpenSSL providing such a formatted hash of data passed in. // The methods of this class must be called in a very specific order: First the // ctor (of course), then 0 or more calls to Update(), then Finalize(), then 0 // or more calls to hash(). namespace chromeos_update_engine { class OmahaHashCalculator { public: OmahaHashCalculator(); // Update is called with all of the data that should be hashed in order. // Update will read |length| bytes of |data|. // Returns true on success. bool Update(const char *data, size_t length); // Updates the hash with up to |length| bytes of data from |file|. If |length| // is negative, reads in and updates with the whole file. Returns the number // of bytes that the hash was updated with, or -1 on error. off_t UpdateFile(const std::string &name, off_t length); // Call Finalize() when all data has been passed in. This method tells // OpenSSl that no more data will come in and base64 encodes the resulting // hash. // Returns true on success. bool Finalize(); // Gets the hash. Finalize() must have been called. const std::string &hash() const { DCHECK(!hash_.empty()) << "Call Finalize() first"; return hash_; } const std::vector<char> &raw_hash() const { DCHECK(!raw_hash_.empty()) << "Call Finalize() first"; return raw_hash_; } // Gets the current hash context. Note that the string will contain binary // data (including \0 characters). std::string GetContext() const; // Sets the current hash context. |context| must the string returned by a // previous OmahaHashCalculator::GetContext method call. Returns true on // success, and false otherwise. bool SetContext(const std::string &context); static bool RawHashOfBytes(const char *data, size_t length, std::vector<char> *out_hash); static bool RawHashOfData(const std::vector<char> &data, std::vector<char> *out_hash); static off_t RawHashOfFile(const std::string &name, off_t length, std::vector<char> *out_hash); // Used by tests static std::string OmahaHashOfBytes(const void *data, size_t length); static std::string OmahaHashOfString(const std::string &str); static std::string OmahaHashOfData(const std::vector<char> &data); // Encodes data of given size as a base64 out string static bool Base64Encode(const void *data, size_t size, std::string *out); // Decodes given base64-encoded in string into the out vector. Since the // output can have null characters, we're returning a byte vector instead of // a string. This method works fine even if |raw_in| has any newlines. // Any existing contents of |out| will be erased. static bool Base64Decode(const std::string &raw_in, std::vector<char> *out); private: // If non-empty, the final base64 encoded hash and the raw hash. Will only be // set to non-empty when Finalize is called. std::string hash_; std::vector<char> raw_hash_; // Init success bool valid_; // The hash state used by OpenSSL SHA256_CTX ctx_; DISALLOW_COPY_AND_ASSIGN(OmahaHashCalculator); }; } // namespace chromeos_update_engine #endif // CHROMEOS_PLATFORM_UPDATE_ENGINE_OMAHA_HASH_CALCULATOR_H__
36.803738
82
0.686643
[ "vector" ]
aec55fee00a8d0302a899aa9b0eb1c30f1b419f0
14,559
h
C
aws-cpp-sdk-transcribe/include/aws/transcribe/model/CreateLanguageModelRequest.h
Nexuscompute/aws-sdk-cpp
e7ef485e46e6962c9e084b8c9b104c1bfcceaf26
[ "Apache-2.0" ]
1
2022-01-05T18:20:03.000Z
2022-01-05T18:20:03.000Z
aws-cpp-sdk-transcribe/include/aws/transcribe/model/CreateLanguageModelRequest.h
Nexuscompute/aws-sdk-cpp
e7ef485e46e6962c9e084b8c9b104c1bfcceaf26
[ "Apache-2.0" ]
null
null
null
aws-cpp-sdk-transcribe/include/aws/transcribe/model/CreateLanguageModelRequest.h
Nexuscompute/aws-sdk-cpp
e7ef485e46e6962c9e084b8c9b104c1bfcceaf26
[ "Apache-2.0" ]
1
2021-11-09T11:58:03.000Z
2021-11-09T11:58:03.000Z
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/transcribe/TranscribeService_EXPORTS.h> #include <aws/transcribe/TranscribeServiceRequest.h> #include <aws/transcribe/model/CLMLanguageCode.h> #include <aws/transcribe/model/BaseModelName.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <aws/transcribe/model/InputDataConfig.h> #include <aws/core/utils/memory/stl/AWSVector.h> #include <aws/transcribe/model/Tag.h> #include <utility> namespace Aws { namespace TranscribeService { namespace Model { /** */ class AWS_TRANSCRIBESERVICE_API CreateLanguageModelRequest : public TranscribeServiceRequest { public: CreateLanguageModelRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "CreateLanguageModel"; } Aws::String SerializePayload() const override; Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override; /** * <p>The language of your custom language model; note that the language code you * select must match the language of your training and tuning data.</p> */ inline const CLMLanguageCode& GetLanguageCode() const{ return m_languageCode; } /** * <p>The language of your custom language model; note that the language code you * select must match the language of your training and tuning data.</p> */ inline bool LanguageCodeHasBeenSet() const { return m_languageCodeHasBeenSet; } /** * <p>The language of your custom language model; note that the language code you * select must match the language of your training and tuning data.</p> */ inline void SetLanguageCode(const CLMLanguageCode& value) { m_languageCodeHasBeenSet = true; m_languageCode = value; } /** * <p>The language of your custom language model; note that the language code you * select must match the language of your training and tuning data.</p> */ inline void SetLanguageCode(CLMLanguageCode&& value) { m_languageCodeHasBeenSet = true; m_languageCode = std::move(value); } /** * <p>The language of your custom language model; note that the language code you * select must match the language of your training and tuning data.</p> */ inline CreateLanguageModelRequest& WithLanguageCode(const CLMLanguageCode& value) { SetLanguageCode(value); return *this;} /** * <p>The language of your custom language model; note that the language code you * select must match the language of your training and tuning data.</p> */ inline CreateLanguageModelRequest& WithLanguageCode(CLMLanguageCode&& value) { SetLanguageCode(std::move(value)); return *this;} /** * <p>The Amazon Transcribe standard language model, or base model, used to create * your custom language model. Amazon Transcribe offers two options for base * models: Wideband and Narrowband.</p> <p>If the audio you want to transcribe has * a sample rate of 16,000 Hz or greater, choose <code>WideBand</code>. To * transcribe audio with a sample rate less than 16,000 Hz, choose * <code>NarrowBand</code>.</p> */ inline const BaseModelName& GetBaseModelName() const{ return m_baseModelName; } /** * <p>The Amazon Transcribe standard language model, or base model, used to create * your custom language model. Amazon Transcribe offers two options for base * models: Wideband and Narrowband.</p> <p>If the audio you want to transcribe has * a sample rate of 16,000 Hz or greater, choose <code>WideBand</code>. To * transcribe audio with a sample rate less than 16,000 Hz, choose * <code>NarrowBand</code>.</p> */ inline bool BaseModelNameHasBeenSet() const { return m_baseModelNameHasBeenSet; } /** * <p>The Amazon Transcribe standard language model, or base model, used to create * your custom language model. Amazon Transcribe offers two options for base * models: Wideband and Narrowband.</p> <p>If the audio you want to transcribe has * a sample rate of 16,000 Hz or greater, choose <code>WideBand</code>. To * transcribe audio with a sample rate less than 16,000 Hz, choose * <code>NarrowBand</code>.</p> */ inline void SetBaseModelName(const BaseModelName& value) { m_baseModelNameHasBeenSet = true; m_baseModelName = value; } /** * <p>The Amazon Transcribe standard language model, or base model, used to create * your custom language model. Amazon Transcribe offers two options for base * models: Wideband and Narrowband.</p> <p>If the audio you want to transcribe has * a sample rate of 16,000 Hz or greater, choose <code>WideBand</code>. To * transcribe audio with a sample rate less than 16,000 Hz, choose * <code>NarrowBand</code>.</p> */ inline void SetBaseModelName(BaseModelName&& value) { m_baseModelNameHasBeenSet = true; m_baseModelName = std::move(value); } /** * <p>The Amazon Transcribe standard language model, or base model, used to create * your custom language model. Amazon Transcribe offers two options for base * models: Wideband and Narrowband.</p> <p>If the audio you want to transcribe has * a sample rate of 16,000 Hz or greater, choose <code>WideBand</code>. To * transcribe audio with a sample rate less than 16,000 Hz, choose * <code>NarrowBand</code>.</p> */ inline CreateLanguageModelRequest& WithBaseModelName(const BaseModelName& value) { SetBaseModelName(value); return *this;} /** * <p>The Amazon Transcribe standard language model, or base model, used to create * your custom language model. Amazon Transcribe offers two options for base * models: Wideband and Narrowband.</p> <p>If the audio you want to transcribe has * a sample rate of 16,000 Hz or greater, choose <code>WideBand</code>. To * transcribe audio with a sample rate less than 16,000 Hz, choose * <code>NarrowBand</code>.</p> */ inline CreateLanguageModelRequest& WithBaseModelName(BaseModelName&& value) { SetBaseModelName(std::move(value)); return *this;} /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline const Aws::String& GetModelName() const{ return m_modelName; } /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline bool ModelNameHasBeenSet() const { return m_modelNameHasBeenSet; } /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline void SetModelName(const Aws::String& value) { m_modelNameHasBeenSet = true; m_modelName = value; } /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline void SetModelName(Aws::String&& value) { m_modelNameHasBeenSet = true; m_modelName = std::move(value); } /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline void SetModelName(const char* value) { m_modelNameHasBeenSet = true; m_modelName.assign(value); } /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline CreateLanguageModelRequest& WithModelName(const Aws::String& value) { SetModelName(value); return *this;} /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline CreateLanguageModelRequest& WithModelName(Aws::String&& value) { SetModelName(std::move(value)); return *this;} /** * <p>The name of your new custom language model.</p> <p>This name is case * sensitive, cannot contain spaces, and must be unique within an Amazon Web * Services account. If you try to create a language model with the same name as a * previous language model, you get a <code>ConflictException</code> error.</p> */ inline CreateLanguageModelRequest& WithModelName(const char* value) { SetModelName(value); return *this;} /** * <p>Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 * locations of your training (<code>S3Uri</code>) and tuning * (<code>TuningDataS3Uri</code>) data.</p> */ inline const InputDataConfig& GetInputDataConfig() const{ return m_inputDataConfig; } /** * <p>Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 * locations of your training (<code>S3Uri</code>) and tuning * (<code>TuningDataS3Uri</code>) data.</p> */ inline bool InputDataConfigHasBeenSet() const { return m_inputDataConfigHasBeenSet; } /** * <p>Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 * locations of your training (<code>S3Uri</code>) and tuning * (<code>TuningDataS3Uri</code>) data.</p> */ inline void SetInputDataConfig(const InputDataConfig& value) { m_inputDataConfigHasBeenSet = true; m_inputDataConfig = value; } /** * <p>Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 * locations of your training (<code>S3Uri</code>) and tuning * (<code>TuningDataS3Uri</code>) data.</p> */ inline void SetInputDataConfig(InputDataConfig&& value) { m_inputDataConfigHasBeenSet = true; m_inputDataConfig = std::move(value); } /** * <p>Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 * locations of your training (<code>S3Uri</code>) and tuning * (<code>TuningDataS3Uri</code>) data.</p> */ inline CreateLanguageModelRequest& WithInputDataConfig(const InputDataConfig& value) { SetInputDataConfig(value); return *this;} /** * <p>Contains your data access role ARN (Amazon Resource Name) and the Amazon S3 * locations of your training (<code>S3Uri</code>) and tuning * (<code>TuningDataS3Uri</code>) data.</p> */ inline CreateLanguageModelRequest& WithInputDataConfig(InputDataConfig&& value) { SetInputDataConfig(std::move(value)); return *this;} /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline const Aws::Vector<Tag>& GetTags() const{ return m_tags; } /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline bool TagsHasBeenSet() const { return m_tagsHasBeenSet; } /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline void SetTags(const Aws::Vector<Tag>& value) { m_tagsHasBeenSet = true; m_tags = value; } /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline void SetTags(Aws::Vector<Tag>&& value) { m_tagsHasBeenSet = true; m_tags = std::move(value); } /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline CreateLanguageModelRequest& WithTags(const Aws::Vector<Tag>& value) { SetTags(value); return *this;} /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline CreateLanguageModelRequest& WithTags(Aws::Vector<Tag>&& value) { SetTags(std::move(value)); return *this;} /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline CreateLanguageModelRequest& AddTags(const Tag& value) { m_tagsHasBeenSet = true; m_tags.push_back(value); return *this; } /** * <p>Optionally add tags, each in the form of a key:value pair, to your new * language model. See also: .</p> */ inline CreateLanguageModelRequest& AddTags(Tag&& value) { m_tagsHasBeenSet = true; m_tags.push_back(std::move(value)); return *this; } private: CLMLanguageCode m_languageCode; bool m_languageCodeHasBeenSet; BaseModelName m_baseModelName; bool m_baseModelNameHasBeenSet; Aws::String m_modelName; bool m_modelNameHasBeenSet; InputDataConfig m_inputDataConfig; bool m_inputDataConfigHasBeenSet; Aws::Vector<Tag> m_tags; bool m_tagsHasBeenSet; }; } // namespace Model } // namespace TranscribeService } // namespace Aws
45.927445
138
0.694553
[ "vector", "model" ]
aec7cba63bb137da06b8df8076b39b0b92eee93f
120,129
c
C
m3-sys/m3cc/gcc-4.6/gcc/config/cris/cris.c
RodneyBates/modula-3
9d4ab288617cb92ada991ba78ec2035d4a80e81a
[ "BSD-4-Clause-UC", "BSD-4-Clause" ]
2
2015-03-02T17:01:32.000Z
2021-12-29T14:34:46.000Z
m3-sys/m3cc/gcc-4.6/gcc/config/cris/cris.c
RodneyBates/modula-3
9d4ab288617cb92ada991ba78ec2035d4a80e81a
[ "BSD-4-Clause-UC", "BSD-4-Clause" ]
null
null
null
m3-sys/m3cc/gcc-4.6/gcc/config/cris/cris.c
RodneyBates/modula-3
9d4ab288617cb92ada991ba78ec2035d4a80e81a
[ "BSD-4-Clause-UC", "BSD-4-Clause" ]
1
2021-12-29T14:35:47.000Z
2021-12-29T14:35:47.000Z
/* Definitions for GCC. Part of the machine description for CRIS. Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc. Contributed by Axis Communications. Written by Hans-Peter Nilsson. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "rtl.h" #include "regs.h" #include "hard-reg-set.h" #include "insn-config.h" #include "conditions.h" #include "insn-attr.h" #include "flags.h" #include "tree.h" #include "expr.h" #include "except.h" #include "function.h" #include "diagnostic-core.h" #include "recog.h" #include "reload.h" #include "tm_p.h" #include "debug.h" #include "output.h" #include "target.h" #include "target-def.h" #include "ggc.h" #include "optabs.h" #include "df.h" /* Usable when we have an amount to add or subtract, and want the optimal size of the insn. */ #define ADDITIVE_SIZE_MODIFIER(size) \ ((size) <= 63 ? "q" : (size) <= 255 ? "u.b" : (size) <= 65535 ? "u.w" : ".d") #define LOSE_AND_RETURN(msgid, x) \ do \ { \ cris_operand_lossage (msgid, x); \ return; \ } while (0) enum cris_retinsn_type { CRIS_RETINSN_UNKNOWN = 0, CRIS_RETINSN_RET, CRIS_RETINSN_JUMP }; /* Per-function machine data. */ struct GTY(()) machine_function { int needs_return_address_on_stack; /* This is the number of registers we save in the prologue due to stdarg. */ int stdarg_regs; enum cris_retinsn_type return_type; }; /* This little fix suppresses the 'u' or 's' when '%e' in assembly pattern. */ static char cris_output_insn_is_bound = 0; /* In code for output macros, this is how we know whether e.g. constant goes in code or in a static initializer. */ static int in_code = 0; /* Fix for reg_overlap_mentioned_p. */ static int cris_reg_overlap_mentioned_p (rtx, rtx); static enum machine_mode cris_promote_function_mode (const_tree, enum machine_mode, int *, const_tree, int); static void cris_print_base (rtx, FILE *); static void cris_print_index (rtx, FILE *); static void cris_output_addr_const (FILE *, rtx); static struct machine_function * cris_init_machine_status (void); static rtx cris_struct_value_rtx (tree, int); static void cris_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode, tree type, int *, int); static int cris_initial_frame_pointer_offset (void); static void cris_operand_lossage (const char *, rtx); static int cris_reg_saved_in_regsave_area (unsigned int, bool); static void cris_print_operand (FILE *, rtx, int); static void cris_print_operand_address (FILE *, rtx); static bool cris_print_operand_punct_valid_p (unsigned char code); static void cris_conditional_register_usage (void); static void cris_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT, tree); static void cris_file_start (void); static void cris_init_libfuncs (void); static int cris_register_move_cost (enum machine_mode, reg_class_t, reg_class_t); static int cris_memory_move_cost (enum machine_mode, reg_class_t, bool); static bool cris_rtx_costs (rtx, int, int, int *, bool); static int cris_address_cost (rtx, bool); static bool cris_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode, const_tree, bool); static int cris_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode, tree, bool); static rtx cris_function_arg (CUMULATIVE_ARGS *, enum machine_mode, const_tree, bool); static rtx cris_function_incoming_arg (CUMULATIVE_ARGS *, enum machine_mode, const_tree, bool); static void cris_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode, const_tree, bool); static tree cris_md_asm_clobbers (tree, tree, tree); static bool cris_handle_option (size_t, const char *, int); static void cris_option_override (void); static bool cris_frame_pointer_required (void); static void cris_asm_trampoline_template (FILE *); static void cris_trampoline_init (rtx, tree, rtx); static rtx cris_function_value(const_tree, const_tree, bool); static rtx cris_libcall_value (enum machine_mode, const_rtx); /* This is the parsed result of the "-max-stack-stackframe=" option. If it (still) is zero, then there was no such option given. */ int cris_max_stackframe = 0; /* This is the parsed result of the "-march=" option, if given. */ int cris_cpu_version = CRIS_DEFAULT_CPU_VERSION; /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */ static const struct default_options cris_option_optimization_table[] = { { OPT_LEVELS_2_PLUS, OPT_fomit_frame_pointer, NULL, 1 }, { OPT_LEVELS_NONE, 0, NULL, 0 } }; #undef TARGET_ASM_ALIGNED_HI_OP #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" #undef TARGET_ASM_ALIGNED_SI_OP #define TARGET_ASM_ALIGNED_SI_OP "\t.dword\t" #undef TARGET_ASM_ALIGNED_DI_OP #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" /* We need to define these, since the 2byte, 4byte, 8byte op:s are only available in ELF. These "normal" pseudos do not have any alignment constraints or side-effects. */ #undef TARGET_ASM_UNALIGNED_HI_OP #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP #undef TARGET_ASM_UNALIGNED_SI_OP #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP #undef TARGET_ASM_UNALIGNED_DI_OP #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP #undef TARGET_PRINT_OPERAND #define TARGET_PRINT_OPERAND cris_print_operand #undef TARGET_PRINT_OPERAND_ADDRESS #define TARGET_PRINT_OPERAND_ADDRESS cris_print_operand_address #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P #define TARGET_PRINT_OPERAND_PUNCT_VALID_P cris_print_operand_punct_valid_p #undef TARGET_CONDITIONAL_REGISTER_USAGE #define TARGET_CONDITIONAL_REGISTER_USAGE cris_conditional_register_usage #undef TARGET_ASM_OUTPUT_MI_THUNK #define TARGET_ASM_OUTPUT_MI_THUNK cris_asm_output_mi_thunk #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall #undef TARGET_ASM_FILE_START #define TARGET_ASM_FILE_START cris_file_start #undef TARGET_INIT_LIBFUNCS #define TARGET_INIT_LIBFUNCS cris_init_libfuncs #undef TARGET_REGISTER_MOVE_COST #define TARGET_REGISTER_MOVE_COST cris_register_move_cost #undef TARGET_MEMORY_MOVE_COST #define TARGET_MEMORY_MOVE_COST cris_memory_move_cost #undef TARGET_RTX_COSTS #define TARGET_RTX_COSTS cris_rtx_costs #undef TARGET_ADDRESS_COST #define TARGET_ADDRESS_COST cris_address_cost #undef TARGET_PROMOTE_FUNCTION_MODE #define TARGET_PROMOTE_FUNCTION_MODE cris_promote_function_mode #undef TARGET_STRUCT_VALUE_RTX #define TARGET_STRUCT_VALUE_RTX cris_struct_value_rtx #undef TARGET_SETUP_INCOMING_VARARGS #define TARGET_SETUP_INCOMING_VARARGS cris_setup_incoming_varargs #undef TARGET_PASS_BY_REFERENCE #define TARGET_PASS_BY_REFERENCE cris_pass_by_reference #undef TARGET_ARG_PARTIAL_BYTES #define TARGET_ARG_PARTIAL_BYTES cris_arg_partial_bytes #undef TARGET_FUNCTION_ARG #define TARGET_FUNCTION_ARG cris_function_arg #undef TARGET_FUNCTION_INCOMING_ARG #define TARGET_FUNCTION_INCOMING_ARG cris_function_incoming_arg #undef TARGET_FUNCTION_ARG_ADVANCE #define TARGET_FUNCTION_ARG_ADVANCE cris_function_arg_advance #undef TARGET_MD_ASM_CLOBBERS #define TARGET_MD_ASM_CLOBBERS cris_md_asm_clobbers #undef TARGET_DEFAULT_TARGET_FLAGS #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | CRIS_SUBTARGET_DEFAULT) #undef TARGET_HANDLE_OPTION #define TARGET_HANDLE_OPTION cris_handle_option #undef TARGET_FRAME_POINTER_REQUIRED #define TARGET_FRAME_POINTER_REQUIRED cris_frame_pointer_required #undef TARGET_OPTION_OVERRIDE #define TARGET_OPTION_OVERRIDE cris_option_override #undef TARGET_OPTION_OPTIMIZATION_TABLE #define TARGET_OPTION_OPTIMIZATION_TABLE cris_option_optimization_table #undef TARGET_ASM_TRAMPOLINE_TEMPLATE #define TARGET_ASM_TRAMPOLINE_TEMPLATE cris_asm_trampoline_template #undef TARGET_TRAMPOLINE_INIT #define TARGET_TRAMPOLINE_INIT cris_trampoline_init #undef TARGET_FUNCTION_VALUE #define TARGET_FUNCTION_VALUE cris_function_value #undef TARGET_LIBCALL_VALUE #define TARGET_LIBCALL_VALUE cris_libcall_value struct gcc_target targetm = TARGET_INITIALIZER; /* Helper for cris_load_multiple_op and cris_ret_movem_op. */ bool cris_movem_load_rest_p (rtx op, int offs) { unsigned int reg_count = XVECLEN (op, 0) - offs; rtx src_addr; int i; rtx elt; int setno; int regno_dir = 1; unsigned int regno = 0; /* Perform a quick check so we don't blow up below. FIXME: Adjust for other than (MEM reg). */ if (reg_count <= 1 || GET_CODE (XVECEXP (op, 0, offs)) != SET || !REG_P (SET_DEST (XVECEXP (op, 0, offs))) || !MEM_P (SET_SRC (XVECEXP (op, 0, offs)))) return false; /* Check a possible post-inc indicator. */ if (GET_CODE (SET_SRC (XVECEXP (op, 0, offs + 1))) == PLUS) { rtx reg = XEXP (SET_SRC (XVECEXP (op, 0, offs + 1)), 0); rtx inc = XEXP (SET_SRC (XVECEXP (op, 0, offs + 1)), 1); reg_count--; if (reg_count == 1 || !REG_P (reg) || !REG_P (SET_DEST (XVECEXP (op, 0, offs + 1))) || REGNO (reg) != REGNO (SET_DEST (XVECEXP (op, 0, offs + 1))) || !CONST_INT_P (inc) || INTVAL (inc) != (HOST_WIDE_INT) reg_count * 4) return false; i = offs + 2; } else i = offs + 1; if (!TARGET_V32) { regno_dir = -1; regno = reg_count - 1; } elt = XVECEXP (op, 0, offs); src_addr = XEXP (SET_SRC (elt), 0); if (GET_CODE (elt) != SET || !REG_P (SET_DEST (elt)) || GET_MODE (SET_DEST (elt)) != SImode || REGNO (SET_DEST (elt)) != regno || !MEM_P (SET_SRC (elt)) || GET_MODE (SET_SRC (elt)) != SImode || !memory_address_p (SImode, src_addr)) return false; for (setno = 1; i < XVECLEN (op, 0); setno++, i++) { rtx elt = XVECEXP (op, 0, i); regno += regno_dir; if (GET_CODE (elt) != SET || !REG_P (SET_DEST (elt)) || GET_MODE (SET_DEST (elt)) != SImode || REGNO (SET_DEST (elt)) != regno || !MEM_P (SET_SRC (elt)) || GET_MODE (SET_SRC (elt)) != SImode || GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr) || !CONST_INT_P (XEXP (XEXP (SET_SRC (elt), 0), 1)) || INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != setno * 4) return false; } return true; } /* Worker function for predicate for the parallel contents in a movem to-memory. */ bool cris_store_multiple_op_p (rtx op) { int reg_count = XVECLEN (op, 0); rtx dest; rtx dest_addr; rtx dest_base; int i; rtx elt; int setno; int regno_dir = 1; int regno = 0; int offset = 0; /* Perform a quick check so we don't blow up below. FIXME: Adjust for other than (MEM reg) and (MEM (PLUS reg const)). */ if (reg_count <= 1) return false; elt = XVECEXP (op, 0, 0); if (GET_CODE (elt) != SET) return false; dest = SET_DEST (elt); if (!REG_P (SET_SRC (elt)) || !MEM_P (dest)) return false; dest_addr = XEXP (dest, 0); /* Check a possible post-inc indicator. */ if (GET_CODE (SET_SRC (XVECEXP (op, 0, 1))) == PLUS) { rtx reg = XEXP (SET_SRC (XVECEXP (op, 0, 1)), 0); rtx inc = XEXP (SET_SRC (XVECEXP (op, 0, 1)), 1); reg_count--; if (reg_count == 1 || !REG_P (reg) || !REG_P (SET_DEST (XVECEXP (op, 0, 1))) || REGNO (reg) != REGNO (SET_DEST (XVECEXP (op, 0, 1))) || !CONST_INT_P (inc) /* Support increment by number of registers, and by the offset of the destination, if it has the form (MEM (PLUS reg offset)). */ || !((REG_P (dest_addr) && REGNO (dest_addr) == REGNO (reg) && INTVAL (inc) == (HOST_WIDE_INT) reg_count * 4) || (GET_CODE (dest_addr) == PLUS && REG_P (XEXP (dest_addr, 0)) && REGNO (XEXP (dest_addr, 0)) == REGNO (reg) && CONST_INT_P (XEXP (dest_addr, 1)) && INTVAL (XEXP (dest_addr, 1)) == INTVAL (inc)))) return false; i = 2; } else i = 1; if (!TARGET_V32) { regno_dir = -1; regno = reg_count - 1; } if (GET_CODE (elt) != SET || !REG_P (SET_SRC (elt)) || GET_MODE (SET_SRC (elt)) != SImode || REGNO (SET_SRC (elt)) != (unsigned int) regno || !MEM_P (SET_DEST (elt)) || GET_MODE (SET_DEST (elt)) != SImode) return false; if (REG_P (dest_addr)) { dest_base = dest_addr; offset = 0; } else if (GET_CODE (dest_addr) == PLUS && REG_P (XEXP (dest_addr, 0)) && CONST_INT_P (XEXP (dest_addr, 1))) { dest_base = XEXP (dest_addr, 0); offset = INTVAL (XEXP (dest_addr, 1)); } else return false; for (setno = 1; i < XVECLEN (op, 0); setno++, i++) { rtx elt = XVECEXP (op, 0, i); regno += regno_dir; if (GET_CODE (elt) != SET || !REG_P (SET_SRC (elt)) || GET_MODE (SET_SRC (elt)) != SImode || REGNO (SET_SRC (elt)) != (unsigned int) regno || !MEM_P (SET_DEST (elt)) || GET_MODE (SET_DEST (elt)) != SImode || GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_base) || !CONST_INT_P (XEXP (XEXP (SET_DEST (elt), 0), 1)) || INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != setno * 4 + offset) return false; } return true; } /* The TARGET_CONDITIONAL_REGISTER_USAGE worker. */ static void cris_conditional_register_usage (void) { /* FIXME: This isn't nice. We should be able to use that register for something else if the PIC table isn't needed. */ if (flag_pic) fixed_regs[PIC_OFFSET_TABLE_REGNUM] = call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1; /* Allow use of ACR (PC in pre-V32) and tweak order. */ if (TARGET_V32) { static const int reg_alloc_order_v32[] = REG_ALLOC_ORDER_V32; unsigned int i; fixed_regs[CRIS_ACR_REGNUM] = 0; for (i = 0; i < sizeof (reg_alloc_order_v32)/sizeof (reg_alloc_order_v32[0]); i++) reg_alloc_order[i] = reg_alloc_order_v32[i]; } if (TARGET_HAS_MUL_INSNS) fixed_regs[CRIS_MOF_REGNUM] = 0; /* On early versions, we must use the 16-bit condition-code register, which has another name. */ if (cris_cpu_version < 8) reg_names[CRIS_CC0_REGNUM] = "ccr"; } /* Return crtl->uses_pic_offset_table. For use in cris.md, since some generated files do not include function.h. */ int cris_cfun_uses_pic_table (void) { return crtl->uses_pic_offset_table; } /* Given an rtx, return the text string corresponding to the CODE of X. Intended for use in the assembly language output section of a define_insn. */ const char * cris_op_str (rtx x) { cris_output_insn_is_bound = 0; switch (GET_CODE (x)) { case PLUS: return "add"; break; case MINUS: return "sub"; break; case MULT: /* This function is for retrieving a part of an instruction name for an operator, for immediate output. If that ever happens for MULT, we need to apply TARGET_MUL_BUG in the caller. Make sure we notice. */ internal_error ("MULT case in cris_op_str"); break; case DIV: return "div"; break; case AND: return "and"; break; case IOR: return "or"; break; case XOR: return "xor"; break; case NOT: return "not"; break; case ASHIFT: return "lsl"; break; case LSHIFTRT: return "lsr"; break; case ASHIFTRT: return "asr"; break; case UMIN: /* Used to control the sign/zero-extend character for the 'E' modifier. BOUND has none. */ cris_output_insn_is_bound = 1; return "bound"; break; default: return "Unknown operator"; break; } } /* Emit an error message when we're in an asm, and a fatal error for "normal" insns. Formatted output isn't easily implemented, since we use output_operand_lossage to output the actual message and handle the categorization of the error. */ static void cris_operand_lossage (const char *msgid, rtx op) { debug_rtx (op); output_operand_lossage ("%s", msgid); } /* Print an index part of an address to file. */ static void cris_print_index (rtx index, FILE *file) { /* Make the index "additive" unless we'll output a negative number, in which case the sign character is free (as in free beer). */ if (!CONST_INT_P (index) || INTVAL (index) >= 0) putc ('+', file); if (REG_P (index)) fprintf (file, "$%s.b", reg_names[REGNO (index)]); else if (CONSTANT_P (index)) cris_output_addr_const (file, index); else if (GET_CODE (index) == MULT) { fprintf (file, "$%s.", reg_names[REGNO (XEXP (index, 0))]); putc (INTVAL (XEXP (index, 1)) == 2 ? 'w' : 'd', file); } else if (GET_CODE (index) == SIGN_EXTEND && MEM_P (XEXP (index, 0))) { rtx inner = XEXP (index, 0); rtx inner_inner = XEXP (inner, 0); if (GET_CODE (inner_inner) == POST_INC) { fprintf (file, "[$%s+].", reg_names[REGNO (XEXP (inner_inner, 0))]); putc (GET_MODE (inner) == HImode ? 'w' : 'b', file); } else { fprintf (file, "[$%s].", reg_names[REGNO (inner_inner)]); putc (GET_MODE (inner) == HImode ? 'w' : 'b', file); } } else if (MEM_P (index)) { rtx inner = XEXP (index, 0); if (GET_CODE (inner) == POST_INC) fprintf (file, "[$%s+].d", reg_names[REGNO (XEXP (inner, 0))]); else fprintf (file, "[$%s].d", reg_names[REGNO (inner)]); } else cris_operand_lossage ("unexpected index-type in cris_print_index", index); } /* Print a base rtx of an address to file. */ static void cris_print_base (rtx base, FILE *file) { if (REG_P (base)) fprintf (file, "$%s", reg_names[REGNO (base)]); else if (GET_CODE (base) == POST_INC) { gcc_assert (REGNO (XEXP (base, 0)) != CRIS_ACR_REGNUM); fprintf (file, "$%s+", reg_names[REGNO (XEXP (base, 0))]); } else cris_operand_lossage ("unexpected base-type in cris_print_base", base); } /* Usable as a guard in expressions. */ int cris_fatal (char *arg) { internal_error (arg); /* We'll never get here; this is just to appease compilers. */ return 0; } /* Return nonzero if REGNO is an ordinary register that *needs* to be saved together with other registers, possibly by a MOVEM instruction, or is saved for target-independent reasons. There may be target-dependent reasons to save the register anyway; this is just a wrapper for a complicated conditional. */ static int cris_reg_saved_in_regsave_area (unsigned int regno, bool got_really_used) { return (((df_regs_ever_live_p (regno) && !call_used_regs[regno]) || (regno == PIC_OFFSET_TABLE_REGNUM && (got_really_used /* It is saved anyway, if there would be a gap. */ || (flag_pic && df_regs_ever_live_p (regno + 1) && !call_used_regs[regno + 1])))) && (regno != FRAME_POINTER_REGNUM || !frame_pointer_needed) && regno != CRIS_SRP_REGNUM) || (crtl->calls_eh_return && (regno == EH_RETURN_DATA_REGNO (0) || regno == EH_RETURN_DATA_REGNO (1) || regno == EH_RETURN_DATA_REGNO (2) || regno == EH_RETURN_DATA_REGNO (3))); } /* The PRINT_OPERAND worker. */ static void cris_print_operand (FILE *file, rtx x, int code) { rtx operand = x; /* Size-strings corresponding to MULT expressions. */ static const char *const mults[] = { "BAD:0", ".b", ".w", "BAD:3", ".d" }; /* New code entries should just be added to the switch below. If handling is finished, just return. If handling was just a modification of the operand, the modified operand should be put in "operand", and then do a break to let default handling (zero-modifier) output the operand. */ switch (code) { case 'b': /* Print the unsigned supplied integer as if it were signed and < 0, i.e print 255 or 65535 as -1, 254, 65534 as -2, etc. */ if (!CONST_INT_P (x) || !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (x), 'O')) LOSE_AND_RETURN ("invalid operand for 'b' modifier", x); fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x)| (INTVAL (x) <= 255 ? ~255 : ~65535)); return; case 'x': /* Print assembler code for operator. */ fprintf (file, "%s", cris_op_str (operand)); return; case 'o': { /* A movem modifier working on a parallel; output the register name. */ int regno; if (GET_CODE (x) != PARALLEL) LOSE_AND_RETURN ("invalid operand for 'o' modifier", x); /* The second item can be (set reg (plus reg const)) to denote a postincrement. */ regno = (GET_CODE (SET_SRC (XVECEXP (x, 0, 1))) == PLUS ? XVECLEN (x, 0) - 2 : XVECLEN (x, 0) - 1); fprintf (file, "$%s", reg_names [regno]); } return; case 'O': { /* A similar movem modifier; output the memory operand. */ rtx addr; if (GET_CODE (x) != PARALLEL) LOSE_AND_RETURN ("invalid operand for 'O' modifier", x); /* The lowest mem operand is in the first item, but perhaps it needs to be output as postincremented. */ addr = MEM_P (SET_SRC (XVECEXP (x, 0, 0))) ? XEXP (SET_SRC (XVECEXP (x, 0, 0)), 0) : XEXP (SET_DEST (XVECEXP (x, 0, 0)), 0); /* The second item can be a (set reg (plus reg const)) to denote a modification. */ if (GET_CODE (SET_SRC (XVECEXP (x, 0, 1))) == PLUS) { /* It's a post-increment, if the address is a naked (reg). */ if (REG_P (addr)) addr = gen_rtx_POST_INC (SImode, addr); else { /* Otherwise, it's a side-effect; RN=RN+M. */ fprintf (file, "[$%s=$%s%s%d]", reg_names [REGNO (SET_DEST (XVECEXP (x, 0, 1)))], reg_names [REGNO (XEXP (addr, 0))], INTVAL (XEXP (addr, 1)) < 0 ? "" : "+", (int) INTVAL (XEXP (addr, 1))); return; } } output_address (addr); } return; case 'p': /* Adjust a power of two to its log2. */ if (!CONST_INT_P (x) || exact_log2 (INTVAL (x)) < 0 ) LOSE_AND_RETURN ("invalid operand for 'p' modifier", x); fprintf (file, "%d", exact_log2 (INTVAL (x))); return; case 's': /* For an integer, print 'b' or 'w' if <= 255 or <= 65535 respectively. This modifier also terminates the inhibiting effects of the 'x' modifier. */ cris_output_insn_is_bound = 0; if (GET_MODE (x) == VOIDmode && CONST_INT_P (x)) { if (INTVAL (x) >= 0) { if (INTVAL (x) <= 255) putc ('b', file); else if (INTVAL (x) <= 65535) putc ('w', file); else putc ('d', file); } else putc ('d', file); return; } /* For a non-integer, print the size of the operand. */ putc ((GET_MODE (x) == SImode || GET_MODE (x) == SFmode) ? 'd' : GET_MODE (x) == HImode ? 'w' : GET_MODE (x) == QImode ? 'b' /* If none of the above, emit an erroneous size letter. */ : 'X', file); return; case 'z': /* Const_int: print b for -127 <= x <= 255, w for -32768 <= x <= 65535, else die. */ if (!CONST_INT_P (x) || INTVAL (x) < -32768 || INTVAL (x) > 65535) LOSE_AND_RETURN ("invalid operand for 'z' modifier", x); putc (INTVAL (x) >= -128 && INTVAL (x) <= 255 ? 'b' : 'w', file); return; case 'Z': /* If this is a GOT-symbol, print the size-letter corresponding to -fpic/-fPIC. For everything else, print "d". */ putc ((flag_pic == 1 && GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC && XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_GOTREAD) ? 'w' : 'd', file); return; case '#': /* Output a 'nop' if there's nothing for the delay slot. This method stolen from the sparc files. */ if (dbr_sequence_length () == 0) fputs ("\n\tnop", file); return; case '!': /* Output directive for alignment padded with "nop" insns. Optimizing for size, it's plain 4-byte alignment, otherwise we align the section to a cache-line (32 bytes) and skip at max 2 bytes, i.e. we skip if it's the last insn on a cache-line. The latter is faster by a small amount (for two test-programs 99.6% and 99.9%) and larger by a small amount (ditto 100.1% and 100.2%). This is supposed to be the simplest yet performance- wise least intrusive way to make sure the immediately following (supposed) muls/mulu insn isn't located at the end of a cache-line. */ if (TARGET_MUL_BUG) fputs (optimize_size ? ".p2alignw 2,0x050f\n\t" : ".p2alignw 5,0x050f,2\n\t", file); return; case ':': /* The PIC register. */ if (! flag_pic) internal_error ("invalid use of ':' modifier"); fprintf (file, "$%s", reg_names [PIC_OFFSET_TABLE_REGNUM]); return; case 'H': /* Print high (most significant) part of something. */ switch (GET_CODE (operand)) { case CONST_INT: /* If we're having 64-bit HOST_WIDE_INTs, the whole (DImode) value is kept here, and so may be other than 0 or -1. */ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (operand_subword (operand, 1, 0, DImode))); return; case CONST_DOUBLE: /* High part of a long long constant. */ if (GET_MODE (operand) == VOIDmode) { fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_HIGH (x)); return; } else LOSE_AND_RETURN ("invalid operand for 'H' modifier", x); case REG: /* Print reg + 1. Check that there's not an attempt to print high-parts of registers like stack-pointer or higher, except for SRP (where the "high part" is MOF). */ if (REGNO (operand) > STACK_POINTER_REGNUM - 2 && (REGNO (operand) != CRIS_SRP_REGNUM || CRIS_SRP_REGNUM + 1 != CRIS_MOF_REGNUM || fixed_regs[CRIS_MOF_REGNUM] != 0)) LOSE_AND_RETURN ("bad register", operand); fprintf (file, "$%s", reg_names[REGNO (operand) + 1]); return; case MEM: /* Adjust memory address to high part. */ { rtx adj_mem = operand; int size = GET_MODE_BITSIZE (GET_MODE (operand)) / BITS_PER_UNIT; /* Adjust so we can use two SImode in DImode. Calling adj_offsettable_operand will make sure it is an offsettable address. Don't do this for a postincrement though; it should remain as it was. */ if (GET_CODE (XEXP (adj_mem, 0)) != POST_INC) adj_mem = adjust_address (adj_mem, GET_MODE (adj_mem), size / 2); output_address (XEXP (adj_mem, 0)); return; } default: LOSE_AND_RETURN ("invalid operand for 'H' modifier", x); } case 'L': /* Strip the MEM expression. */ operand = XEXP (operand, 0); break; case 'e': /* Like 'E', but ignore state set by 'x'. FIXME: Use code iterators and attributes in cris.md to avoid the need for %x and %E (and %e) and state passed between those modifiers. */ cris_output_insn_is_bound = 0; /* FALL THROUGH. */ case 'E': /* Print 's' if operand is SIGN_EXTEND or 'u' if ZERO_EXTEND unless cris_output_insn_is_bound is nonzero. */ if (GET_CODE (operand) != SIGN_EXTEND && GET_CODE (operand) != ZERO_EXTEND && !CONST_INT_P (operand)) LOSE_AND_RETURN ("invalid operand for 'e' modifier", x); if (cris_output_insn_is_bound) { cris_output_insn_is_bound = 0; return; } putc (GET_CODE (operand) == SIGN_EXTEND || (CONST_INT_P (operand) && INTVAL (operand) < 0) ? 's' : 'u', file); return; case 'm': /* Print the size letter of the inner element. We can do it by calling ourselves with the 's' modifier. */ if (GET_CODE (operand) != SIGN_EXTEND && GET_CODE (operand) != ZERO_EXTEND) LOSE_AND_RETURN ("invalid operand for 'm' modifier", x); cris_print_operand (file, XEXP (operand, 0), 's'); return; case 'M': /* Print the least significant part of operand. */ if (GET_CODE (operand) == CONST_DOUBLE) { fprintf (file, HOST_WIDE_INT_PRINT_HEX, CONST_DOUBLE_LOW (x)); return; } else if (HOST_BITS_PER_WIDE_INT > 32 && CONST_INT_P (operand)) { fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (x) & ((unsigned int) 0x7fffffff * 2 + 1)); return; } /* Otherwise the least significant part equals the normal part, so handle it normally. */ break; case 'A': /* When emitting an add for the high part of a DImode constant, we want to use addq for 0 and adds.w for -1. */ if (!CONST_INT_P (operand)) LOSE_AND_RETURN ("invalid operand for 'A' modifier", x); fprintf (file, INTVAL (operand) < 0 ? "adds.w" : "addq"); return; case 'd': /* If this is a GOT symbol, force it to be emitted as :GOT and :GOTPLT regardless of -fpic (i.e. not as :GOT16, :GOTPLT16). Avoid making this too much of a special case. */ if (flag_pic == 1 && CONSTANT_P (operand)) { int flag_pic_save = flag_pic; flag_pic = 2; cris_output_addr_const (file, operand); flag_pic = flag_pic_save; return; } break; case 'D': /* When emitting an sub for the high part of a DImode constant, we want to use subq for 0 and subs.w for -1. */ if (!CONST_INT_P (operand)) LOSE_AND_RETURN ("invalid operand for 'D' modifier", x); fprintf (file, INTVAL (operand) < 0 ? "subs.w" : "subq"); return; case 'S': /* Print the operand as the index-part of an address. Easiest way out is to use cris_print_index. */ cris_print_index (operand, file); return; case 'T': /* Print the size letter for an operand to a MULT, which must be a const_int with a suitable value. */ if (!CONST_INT_P (operand) || INTVAL (operand) > 4) LOSE_AND_RETURN ("invalid operand for 'T' modifier", x); fprintf (file, "%s", mults[INTVAL (operand)]); return; case 'u': /* Print "u.w" if a GOT symbol and flag_pic == 1, else ".d". */ if (flag_pic == 1 && GET_CODE (operand) == CONST && GET_CODE (XEXP (operand, 0)) == UNSPEC && XINT (XEXP (operand, 0), 1) == CRIS_UNSPEC_GOTREAD) fprintf (file, "u.w"); else fprintf (file, ".d"); return; case 0: /* No code, print as usual. */ break; default: LOSE_AND_RETURN ("invalid operand modifier letter", x); } /* Print an operand as without a modifier letter. */ switch (GET_CODE (operand)) { case REG: if (REGNO (operand) > 15 && REGNO (operand) != CRIS_MOF_REGNUM && REGNO (operand) != CRIS_SRP_REGNUM && REGNO (operand) != CRIS_CC0_REGNUM) internal_error ("internal error: bad register: %d", REGNO (operand)); fprintf (file, "$%s", reg_names[REGNO (operand)]); return; case MEM: output_address (XEXP (operand, 0)); return; case CONST_DOUBLE: if (GET_MODE (operand) == VOIDmode) /* A long long constant. */ output_addr_const (file, operand); else { /* Only single precision is allowed as plain operands the moment. FIXME: REAL_VALUE_FROM_CONST_DOUBLE isn't documented. */ REAL_VALUE_TYPE r; long l; /* FIXME: Perhaps check overflow of the "single". */ REAL_VALUE_FROM_CONST_DOUBLE (r, operand); REAL_VALUE_TO_TARGET_SINGLE (r, l); fprintf (file, "0x%lx", l); } return; case UNSPEC: /* Fall through. */ case CONST: cris_output_addr_const (file, operand); return; case MULT: case ASHIFT: { /* For a (MULT (reg X) const_int) we output "rX.S". */ int i = CONST_INT_P (XEXP (operand, 1)) ? INTVAL (XEXP (operand, 1)) : INTVAL (XEXP (operand, 0)); rtx reg = CONST_INT_P (XEXP (operand, 1)) ? XEXP (operand, 0) : XEXP (operand, 1); if (!REG_P (reg) || (!CONST_INT_P (XEXP (operand, 0)) && !CONST_INT_P (XEXP (operand, 1)))) LOSE_AND_RETURN ("unexpected multiplicative operand", x); cris_print_base (reg, file); fprintf (file, ".%c", i == 0 || (i == 1 && GET_CODE (operand) == MULT) ? 'b' : i == 4 ? 'd' : (i == 2 && GET_CODE (operand) == MULT) || i == 1 ? 'w' : 'd'); return; } default: /* No need to handle all strange variants, let output_addr_const do it for us. */ if (CONSTANT_P (operand)) { cris_output_addr_const (file, operand); return; } LOSE_AND_RETURN ("unexpected operand", x); } } static bool cris_print_operand_punct_valid_p (unsigned char code) { return (code == '#' || code == '!' || code == ':'); } /* The PRINT_OPERAND_ADDRESS worker. */ static void cris_print_operand_address (FILE *file, rtx x) { /* All these were inside MEM:s so output indirection characters. */ putc ('[', file); if (CONSTANT_ADDRESS_P (x)) cris_output_addr_const (file, x); else if (BASE_OR_AUTOINCR_P (x)) cris_print_base (x, file); else if (GET_CODE (x) == PLUS) { rtx x1, x2; x1 = XEXP (x, 0); x2 = XEXP (x, 1); if (BASE_P (x1)) { cris_print_base (x1, file); cris_print_index (x2, file); } else if (BASE_P (x2)) { cris_print_base (x2, file); cris_print_index (x1, file); } else LOSE_AND_RETURN ("unrecognized address", x); } else if (MEM_P (x)) { /* A DIP. Output more indirection characters. */ putc ('[', file); cris_print_base (XEXP (x, 0), file); putc (']', file); } else LOSE_AND_RETURN ("unrecognized address", x); putc (']', file); } /* The RETURN_ADDR_RTX worker. We mark that the return address is used, either by EH or __builtin_return_address, for use by the function prologue and epilogue. FIXME: This isn't optimal; we just use the mark in the prologue and epilogue to say that the return address is to be stored in the stack frame. We could return SRP for leaf-functions and use the initial-value machinery. */ rtx cris_return_addr_rtx (int count, rtx frameaddr ATTRIBUTE_UNUSED) { cfun->machine->needs_return_address_on_stack = 1; /* The return-address is stored just above the saved frame-pointer (if present). Apparently we can't eliminate from the frame-pointer in that direction, so use the incoming args (maybe pretended) pointer. */ return count == 0 ? gen_rtx_MEM (Pmode, plus_constant (virtual_incoming_args_rtx, -4)) : NULL_RTX; } /* Accessor used in cris.md:return because cfun->machine isn't available there. */ bool cris_return_address_on_stack (void) { return df_regs_ever_live_p (CRIS_SRP_REGNUM) || cfun->machine->needs_return_address_on_stack; } /* Accessor used in cris.md:return because cfun->machine isn't available there. */ bool cris_return_address_on_stack_for_return (void) { return cfun->machine->return_type == CRIS_RETINSN_RET ? false : cris_return_address_on_stack (); } /* This used to be the INITIAL_FRAME_POINTER_OFFSET worker; now only handles FP -> SP elimination offset. */ static int cris_initial_frame_pointer_offset (void) { int regno; /* Initial offset is 0 if we don't have a frame pointer. */ int offs = 0; bool got_really_used = false; if (crtl->uses_pic_offset_table) { push_topmost_sequence (); got_really_used = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); pop_topmost_sequence (); } /* And 4 for each register pushed. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (cris_reg_saved_in_regsave_area (regno, got_really_used)) offs += 4; /* And then, last, we add the locals allocated. */ offs += get_frame_size (); /* And more; the accumulated args size. */ offs += crtl->outgoing_args_size; /* Then round it off, in case we use aligned stack. */ if (TARGET_STACK_ALIGN) offs = TARGET_ALIGN_BY_32 ? (offs + 3) & ~3 : (offs + 1) & ~1; return offs; } /* The INITIAL_ELIMINATION_OFFSET worker. Calculate the difference between imaginary registers such as frame pointer and the stack pointer. Used to eliminate the frame pointer and imaginary arg pointer. */ int cris_initial_elimination_offset (int fromreg, int toreg) { int fp_sp_offset = cris_initial_frame_pointer_offset (); /* We should be able to use regs_ever_live and related prologue information here, or alpha should not as well. */ bool return_address_on_stack = cris_return_address_on_stack (); /* Here we act as if the frame-pointer were needed. */ int ap_fp_offset = 4 + (return_address_on_stack ? 4 : 0); if (fromreg == ARG_POINTER_REGNUM && toreg == FRAME_POINTER_REGNUM) return ap_fp_offset; /* Between the frame pointer and the stack are only "normal" stack variables and saved registers. */ if (fromreg == FRAME_POINTER_REGNUM && toreg == STACK_POINTER_REGNUM) return fp_sp_offset; /* We need to balance out the frame pointer here. */ if (fromreg == ARG_POINTER_REGNUM && toreg == STACK_POINTER_REGNUM) return ap_fp_offset + fp_sp_offset - 4; gcc_unreachable (); } /* Worker function for LEGITIMIZE_RELOAD_ADDRESS. */ bool cris_reload_address_legitimized (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED, int opnum ATTRIBUTE_UNUSED, int itype, int ind_levels ATTRIBUTE_UNUSED) { enum reload_type type = (enum reload_type) itype; rtx op0, op1; rtx *op1p; if (GET_CODE (x) != PLUS) return false; if (TARGET_V32) return false; op0 = XEXP (x, 0); op1 = XEXP (x, 1); op1p = &XEXP (x, 1); if (!REG_P (op1)) return false; if (GET_CODE (op0) == SIGN_EXTEND && MEM_P (XEXP (op0, 0))) { rtx op00 = XEXP (op0, 0); rtx op000 = XEXP (op00, 0); rtx *op000p = &XEXP (op00, 0); if ((GET_MODE (op00) == HImode || GET_MODE (op00) == QImode) && (REG_P (op000) || (GET_CODE (op000) == POST_INC && REG_P (XEXP (op000, 0))))) { bool something_reloaded = false; if (GET_CODE (op000) == POST_INC && REG_P (XEXP (op000, 0)) && REGNO (XEXP (op000, 0)) > CRIS_LAST_GENERAL_REGISTER) /* No, this gets too complicated and is too rare to care about trying to improve on the general code Here. As the return-value is an all-or-nothing indicator, we punt on the other register too. */ return false; if ((REG_P (op000) && REGNO (op000) > CRIS_LAST_GENERAL_REGISTER)) { /* The address of the inner mem is a pseudo or wrong reg: reload that. */ push_reload (op000, NULL_RTX, op000p, NULL, GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0, opnum, type); something_reloaded = true; } if (REGNO (op1) > CRIS_LAST_GENERAL_REGISTER) { /* Base register is a pseudo or wrong reg: reload it. */ push_reload (op1, NULL_RTX, op1p, NULL, GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0, opnum, type); something_reloaded = true; } gcc_assert (something_reloaded); return true; } } return false; } /* Worker function for TARGET_REGISTER_MOVE_COST. */ static int cris_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED, reg_class_t from, reg_class_t to) { if (!TARGET_V32) { /* Pretend that classes that we don't support are ALL_REGS, so we give them the highest cost. */ if (from != SPECIAL_REGS && from != MOF_REGS && from != GENERAL_REGS && from != GENNONACR_REGS) from = ALL_REGS; if (to != SPECIAL_REGS && to != MOF_REGS && to != GENERAL_REGS && to != GENNONACR_REGS) to = ALL_REGS; } /* Can't move to and from a SPECIAL_REGS register, so we have to say their move cost within that class is higher. How about 7? That's 3 for a move to a GENERAL_REGS register, 3 for the move from the GENERAL_REGS register, and 1 for the increased register pressure. Also, it's higher than the memory move cost, which is in order. We also do this for ALL_REGS, since we don't want that class to be preferred (even to memory) at all where GENERAL_REGS doesn't fit. Whenever it's about to be used, it's for SPECIAL_REGS. If we don't present a higher cost for ALL_REGS than memory, a SPECIAL_REGS may be used when a GENERAL_REGS should be used, even if there are call-saved GENERAL_REGS left to allocate. This is because the fall-back when the most preferred register class isn't available, isn't the next (or next good) wider register class, but the *most widest* register class. */ if ((reg_classes_intersect_p (from, SPECIAL_REGS) && reg_classes_intersect_p (to, SPECIAL_REGS)) || from == ALL_REGS || to == ALL_REGS) return 7; if (reg_classes_intersect_p (from, SPECIAL_REGS) || reg_classes_intersect_p (to, SPECIAL_REGS)) return 3; return 2; } /* Worker function for TARGET_MEMORY_MOVE_COST. This isn't strictly correct for v0..3 in buswidth-8bit mode, but should suffice. */ static int cris_memory_move_cost (enum machine_mode mode, reg_class_t rclass ATTRIBUTE_UNUSED, bool in ATTRIBUTE_UNUSED) { if (mode == QImode || mode == HImode) return 4; else return 6; } /* Worker for cris_notice_update_cc; handles the "normal" cases. FIXME: this code is historical; its functionality should be refactored to look at insn attributes and moved to cris_notice_update_cc. Except, we better lose cc0 entirely. */ static void cris_normal_notice_update_cc (rtx exp, rtx insn) { /* "Normal" means, for: (set (cc0) (...)): CC is (...). (set (reg) (...)): CC is (reg) and (...) - unless (...) is 0 or reg is a special register or (v32 and (...) is -32..-1), then CC does not change. CC_NO_OVERFLOW unless (...) is reg or mem. (set (mem) (...)): CC does not change. (set (pc) (...)): CC does not change. (parallel (set (reg1) (mem (bdap/biap))) (set (reg2) (bdap/biap))): CC is (reg1) and (mem (reg2)) (parallel (set (mem (bdap/biap)) (reg1)) [or 0] (set (reg2) (bdap/biap))): CC does not change. (where reg and mem includes strict_low_parts variants thereof) For all others, assume CC is clobbered. Note that we do not have to care about setting CC_NO_OVERFLOW, since the overflow flag is set to 0 (i.e. right) for instructions where it does not have any sane sense, but where other flags have meanings. (This includes shifts; the carry is not set by them). Note that there are other parallel constructs we could match, but we don't do that yet. */ if (GET_CODE (exp) == SET) { /* FIXME: Check when this happens. It looks like we should actually do a CC_STATUS_INIT here to be safe. */ if (SET_DEST (exp) == pc_rtx) return; /* Record CC0 changes, so we do not have to output multiple test insns. */ if (SET_DEST (exp) == cc0_rtx) { CC_STATUS_INIT; if (GET_CODE (SET_SRC (exp)) == COMPARE && XEXP (SET_SRC (exp), 1) == const0_rtx) cc_status.value1 = XEXP (SET_SRC (exp), 0); else cc_status.value1 = SET_SRC (exp); /* Handle flags for the special btstq on one bit. */ if (GET_CODE (cc_status.value1) == ZERO_EXTRACT && XEXP (cc_status.value1, 1) == const1_rtx) { if (CONST_INT_P (XEXP (cc_status.value1, 0))) /* Using cmpq. */ cc_status.flags = CC_INVERTED; else /* A one-bit btstq. */ cc_status.flags = CC_Z_IN_NOT_N; } else if (GET_CODE (SET_SRC (exp)) == COMPARE) { if (!REG_P (XEXP (SET_SRC (exp), 0)) && XEXP (SET_SRC (exp), 1) != const0_rtx) /* For some reason gcc will not canonicalize compare operations, reversing the sign by itself if operands are in wrong order. */ /* (But NOT inverted; eq is still eq.) */ cc_status.flags = CC_REVERSED; /* This seems to be overlooked by gcc. FIXME: Check again. FIXME: Is it really safe? */ cc_status.value2 = gen_rtx_MINUS (GET_MODE (SET_SRC (exp)), XEXP (SET_SRC (exp), 0), XEXP (SET_SRC (exp), 1)); } return; } else if (REG_P (SET_DEST (exp)) || (GET_CODE (SET_DEST (exp)) == STRICT_LOW_PART && REG_P (XEXP (SET_DEST (exp), 0)))) { /* A register is set; normally CC is set to show that no test insn is needed. Catch the exceptions. */ /* If not to cc0, then no "set"s in non-natural mode give ok cc0... */ if (GET_MODE_SIZE (GET_MODE (SET_DEST (exp))) > UNITS_PER_WORD || GET_MODE_CLASS (GET_MODE (SET_DEST (exp))) == MODE_FLOAT) { /* ... except add:s and sub:s in DImode. */ if (GET_MODE (SET_DEST (exp)) == DImode && (GET_CODE (SET_SRC (exp)) == PLUS || GET_CODE (SET_SRC (exp)) == MINUS)) { CC_STATUS_INIT; cc_status.value1 = SET_DEST (exp); cc_status.value2 = SET_SRC (exp); if (cris_reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) cc_status.value2 = 0; /* Add and sub may set V, which gets us unoptimizable results in "gt" and "le" condition codes. */ cc_status.flags |= CC_NO_OVERFLOW; return; } } else if (SET_SRC (exp) == const0_rtx || (REG_P (SET_SRC (exp)) && (REGNO (SET_SRC (exp)) > CRIS_LAST_GENERAL_REGISTER)) || (TARGET_V32 && GET_CODE (SET_SRC (exp)) == CONST_INT && CRIS_CONST_OK_FOR_LETTER_P (INTVAL (SET_SRC (exp)), 'I'))) { /* There's no CC0 change for this case. Just check for overlap. */ if (cc_status.value1 && modified_in_p (cc_status.value1, insn)) cc_status.value1 = 0; if (cc_status.value2 && modified_in_p (cc_status.value2, insn)) cc_status.value2 = 0; return; } else { CC_STATUS_INIT; cc_status.value1 = SET_DEST (exp); cc_status.value2 = SET_SRC (exp); if (cris_reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) cc_status.value2 = 0; /* Some operations may set V, which gets us unoptimizable results in "gt" and "le" condition codes. */ if (GET_CODE (SET_SRC (exp)) == PLUS || GET_CODE (SET_SRC (exp)) == MINUS || GET_CODE (SET_SRC (exp)) == NEG) cc_status.flags |= CC_NO_OVERFLOW; /* For V32, nothing with a register destination sets C and V usefully. */ if (TARGET_V32) cc_status.flags |= CC_NO_OVERFLOW; return; } } else if (MEM_P (SET_DEST (exp)) || (GET_CODE (SET_DEST (exp)) == STRICT_LOW_PART && MEM_P (XEXP (SET_DEST (exp), 0)))) { /* When SET to MEM, then CC is not changed (except for overlap). */ if (cc_status.value1 && modified_in_p (cc_status.value1, insn)) cc_status.value1 = 0; if (cc_status.value2 && modified_in_p (cc_status.value2, insn)) cc_status.value2 = 0; return; } } else if (GET_CODE (exp) == PARALLEL) { if (GET_CODE (XVECEXP (exp, 0, 0)) == SET && GET_CODE (XVECEXP (exp, 0, 1)) == SET && REG_P (XEXP (XVECEXP (exp, 0, 1), 0))) { if (REG_P (XEXP (XVECEXP (exp, 0, 0), 0)) && MEM_P (XEXP (XVECEXP (exp, 0, 0), 1))) { CC_STATUS_INIT; /* For "move.S [rx=ry+o],rz", say CC reflects value1=rz and value2=[rx] */ cc_status.value1 = XEXP (XVECEXP (exp, 0, 0), 0); cc_status.value2 = replace_equiv_address (XEXP (XVECEXP (exp, 0, 0), 1), XEXP (XVECEXP (exp, 0, 1), 0)); /* Huh? A side-effect cannot change the destination register. */ if (cris_reg_overlap_mentioned_p (cc_status.value1, cc_status.value2)) internal_error ("internal error: sideeffect-insn affecting main effect"); /* For V32, moves to registers don't set C and V. */ if (TARGET_V32) cc_status.flags |= CC_NO_OVERFLOW; return; } else if ((REG_P (XEXP (XVECEXP (exp, 0, 0), 1)) || XEXP (XVECEXP (exp, 0, 0), 1) == const0_rtx) && MEM_P (XEXP (XVECEXP (exp, 0, 0), 0))) { /* For "move.S rz,[rx=ry+o]" and "clear.S [rx=ry+o]", say flags are not changed, except for overlap. */ if (cc_status.value1 && modified_in_p (cc_status.value1, insn)) cc_status.value1 = 0; if (cc_status.value2 && modified_in_p (cc_status.value2, insn)) cc_status.value2 = 0; return; } } } /* If we got here, the case wasn't covered by the code above. */ CC_STATUS_INIT; } /* This function looks into the pattern to see how this insn affects condition codes. Used when to eliminate test insns before a condition-code user, such as a "scc" insn or a conditional branch. This includes checking if the entities that cc was updated by, are changed by the operation. Currently a jumble of the old peek-inside-the-insn and the newer check-cc-attribute methods. */ void cris_notice_update_cc (rtx exp, rtx insn) { enum attr_cc attrval = get_attr_cc (insn); /* Check if user specified "-mcc-init" as a bug-workaround. Remember to still set CC_REVERSED as below, since that's required by some compare insn alternatives. (FIXME: GCC should do this virtual operand swap by itself.) A test-case that may otherwise fail is gcc.c-torture/execute/20000217-1.c -O0 and -O1. */ if (TARGET_CCINIT) { CC_STATUS_INIT; if (attrval == CC_REV) cc_status.flags = CC_REVERSED; return; } /* Slowly, we're converting to using attributes to control the setting of condition-code status. */ switch (attrval) { case CC_NONE: /* Even if it is "none", a setting may clobber a previous cc-value, so check. */ if (GET_CODE (exp) == SET) { if (cc_status.value1 && modified_in_p (cc_status.value1, insn)) cc_status.value1 = 0; if (cc_status.value2 && modified_in_p (cc_status.value2, insn)) cc_status.value2 = 0; } return; case CC_CLOBBER: CC_STATUS_INIT; return; case CC_REV: case CC_NOOV32: case CC_NORMAL: cris_normal_notice_update_cc (exp, insn); /* The "test" insn doesn't clear (carry and) overflow on V32. We can change bge => bpl and blt => bmi by passing on to the cc0 user that V should not be considered; bgt and ble are taken care of by other methods (see {tst,cmp}{si,hi,qi}). */ if (attrval == CC_NOOV32 && TARGET_V32) cc_status.flags |= CC_NO_OVERFLOW; return; default: internal_error ("unknown cc_attr value"); } CC_STATUS_INIT; } /* Return != 0 if the return sequence for the current function is short, like "ret" or "jump [sp+]". Prior to reloading, we can't tell if registers must be saved, so return 0 then. */ bool cris_simple_epilogue (void) { unsigned int regno; unsigned int reglimit = STACK_POINTER_REGNUM; bool got_really_used = false; if (! reload_completed || frame_pointer_needed || get_frame_size () != 0 || crtl->args.pretend_args_size || crtl->args.size || crtl->outgoing_args_size || crtl->calls_eh_return /* If we're not supposed to emit prologue and epilogue, we must not emit return-type instructions. */ || !TARGET_PROLOGUE_EPILOGUE) return false; /* Can't return from stacked return address with v32. */ if (TARGET_V32 && cris_return_address_on_stack ()) return false; if (crtl->uses_pic_offset_table) { push_topmost_sequence (); got_really_used = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); pop_topmost_sequence (); } /* No simple epilogue if there are saved registers. */ for (regno = 0; regno < reglimit; regno++) if (cris_reg_saved_in_regsave_area (regno, got_really_used)) return false; return true; } /* Expand a return insn (just one insn) marked as using SRP or stack slot depending on parameter ON_STACK. */ void cris_expand_return (bool on_stack) { /* FIXME: emit a parallel with a USE for SRP or the stack-slot, to tell "ret" from "jump [sp+]". Some, but not all, other parts of GCC expect just (return) to do the right thing when optimizing, so we do that until they're fixed. Currently, all return insns in a function must be the same (not really a limiting factor) so we need to check that it doesn't change half-way through. */ emit_jump_insn (gen_rtx_RETURN (VOIDmode)); CRIS_ASSERT (cfun->machine->return_type != CRIS_RETINSN_RET || !on_stack); CRIS_ASSERT (cfun->machine->return_type != CRIS_RETINSN_JUMP || on_stack); cfun->machine->return_type = on_stack ? CRIS_RETINSN_JUMP : CRIS_RETINSN_RET; } /* Compute a (partial) cost for rtx X. Return true if the complete cost has been computed, and false if subexpressions should be scanned. In either case, *TOTAL contains the cost result. */ static bool cris_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed) { switch (code) { case CONST_INT: { HOST_WIDE_INT val = INTVAL (x); if (val == 0) *total = 0; else if (val < 32 && val >= -32) *total = 1; /* Eight or 16 bits are a word and cycle more expensive. */ else if (val <= 32767 && val >= -32768) *total = 2; /* A 32-bit constant (or very seldom, unsigned 16 bits) costs another word. FIXME: This isn't linear to 16 bits. */ else *total = 4; return true; } case LABEL_REF: *total = 6; return true; case CONST: case SYMBOL_REF: *total = 6; return true; case CONST_DOUBLE: if (x != CONST0_RTX (GET_MODE (x) == VOIDmode ? DImode : GET_MODE (x))) *total = 12; else /* Make 0.0 cheap, else test-insns will not be used. */ *total = 0; return true; case MULT: /* If we have one arm of an ADDI, make sure it gets the cost of one insn, i.e. zero cost for this operand, and just the cost of the PLUS, as the insn is created by combine from a PLUS and an ASHIFT, and the MULT cost below would make the combined value be larger than the separate insns. The insn validity is checked elsewhere by combine. FIXME: this case is a stop-gap for 4.3 and 4.4, this whole function should be rewritten. */ if (outer_code == PLUS && BIAP_INDEX_P (x)) { *total = 0; return true; } /* Identify values that are no powers of two. Powers of 2 are taken care of already and those values should not be changed. */ if (!CONST_INT_P (XEXP (x, 1)) || exact_log2 (INTVAL (XEXP (x, 1)) < 0)) { /* If we have a multiply insn, then the cost is between 1 and 2 "fast" instructions. */ if (TARGET_HAS_MUL_INSNS) { *total = COSTS_N_INSNS (1) + COSTS_N_INSNS (1) / 2; return true; } /* Estimate as 4 + 4 * #ofbits. */ *total = COSTS_N_INSNS (132); return true; } return false; case UDIV: case MOD: case UMOD: case DIV: if (!CONST_INT_P (XEXP (x, 1)) || exact_log2 (INTVAL (XEXP (x, 1)) < 0)) { /* Estimate this as 4 + 8 * #of bits. */ *total = COSTS_N_INSNS (260); return true; } return false; case AND: if (CONST_INT_P (XEXP (x, 1)) /* Two constants may actually happen before optimization. */ && !CONST_INT_P (XEXP (x, 0)) && !CRIS_CONST_OK_FOR_LETTER_P (INTVAL (XEXP (x, 1)), 'I')) { *total = (rtx_cost (XEXP (x, 0), (enum rtx_code) outer_code, speed) + 2 + 2 * GET_MODE_NUNITS (GET_MODE (XEXP (x, 0)))); return true; } return false; case ZERO_EXTRACT: if (outer_code != COMPARE) return false; /* fall through */ case ZERO_EXTEND: case SIGN_EXTEND: *total = rtx_cost (XEXP (x, 0), (enum rtx_code) outer_code, speed); return true; default: return false; } } /* The ADDRESS_COST worker. */ static int cris_address_cost (rtx x, bool speed ATTRIBUTE_UNUSED) { /* The metric to use for the cost-macros is unclear. The metric used here is (the number of cycles needed) / 2, where we consider equal a cycle for a word of code and a cycle to read memory. FIXME: Adding "+ 1" to all values would avoid returning 0, as tree-ssa-loop-ivopts.c as of r128272 "normalizes" 0 to 1, thereby giving equal costs to [rN + rM] and [rN]. Unfortunately(?) such a hack would expose other pessimizations, at least with g++.dg/tree-ssa/ivopts-1.C, adding insns to the loop there, without apparent reason. */ /* The cheapest addressing modes get 0, since nothing extra is needed. */ if (BASE_OR_AUTOINCR_P (x)) return 0; /* An indirect mem must be a DIP. This means two bytes extra for code, and 4 bytes extra for memory read, i.e. (2 + 4) / 2. */ if (MEM_P (x)) return (2 + 4) / 2; /* Assume (2 + 4) / 2 for a single constant; a dword, since it needs an extra DIP prefix and 4 bytes of constant in most cases. */ if (CONSTANT_P (x)) return (2 + 4) / 2; /* Handle BIAP and BDAP prefixes. */ if (GET_CODE (x) == PLUS) { rtx tem1 = XEXP (x, 0); rtx tem2 = XEXP (x, 1); /* Local extended canonicalization rule: the first operand must be REG, unless it's an operation (MULT). */ if (!REG_P (tem1) && GET_CODE (tem1) != MULT) tem1 = tem2, tem2 = XEXP (x, 0); /* We'll "assume" we have canonical RTX now. */ gcc_assert (REG_P (tem1) || GET_CODE (tem1) == MULT); /* A BIAP is 2 extra bytes for the prefix insn, nothing more. We recognize the typical MULT which is always in tem1 because of insn canonicalization. */ if ((GET_CODE (tem1) == MULT && BIAP_INDEX_P (tem1)) || REG_P (tem2)) return 2 / 2; /* A BDAP (quick) is 2 extra bytes. Any constant operand to the PLUS is always found in tem2. */ if (CONST_INT_P (tem2) && INTVAL (tem2) < 128 && INTVAL (tem2) >= -128) return 2 / 2; /* A BDAP -32768 .. 32767 is like BDAP quick, but with 2 extra bytes. */ if (CONST_INT_P (tem2) && CRIS_CONST_OK_FOR_LETTER_P (INTVAL (tem2), 'L')) return (2 + 2) / 2; /* A BDAP with some other constant is 2 bytes extra. */ if (CONSTANT_P (tem2)) return (2 + 2 + 2) / 2; /* BDAP with something indirect should have a higher cost than BIAP with register. FIXME: Should it cost like a MEM or more? */ return (2 + 2 + 2) / 2; } /* What else? Return a high cost. It matters only for valid addressing modes. */ return 10; } /* Check various objections to the side-effect. Used in the test-part of an anonymous insn describing an insn with a possible side-effect. Returns nonzero if the implied side-effect is ok. code : PLUS or MULT ops : An array of rtx:es. lreg, rreg, rval, The variables multop and other_op are indexes into this, or -1 if they are not applicable. lreg : The register that gets assigned in the side-effect. rreg : One register in the side-effect expression rval : The other register, or an int. multop : An integer to multiply rval with. other_op : One of the entities of the main effect, whose mode we must consider. */ int cris_side_effect_mode_ok (enum rtx_code code, rtx *ops, int lreg, int rreg, int rval, int multop, int other_op) { /* Find what value to multiply with, for rx =ry + rz * n. */ int mult = multop < 0 ? 1 : INTVAL (ops[multop]); rtx reg_rtx = ops[rreg]; rtx val_rtx = ops[rval]; /* The operands may be swapped. Canonicalize them in reg_rtx and val_rtx, where reg_rtx always is a reg (for this constraint to match). */ if (! BASE_P (reg_rtx)) reg_rtx = val_rtx, val_rtx = ops[rreg]; /* Don't forget to check that reg_rtx really is a reg. If it isn't, we have no business. */ if (! BASE_P (reg_rtx)) return 0; /* Don't do this when -mno-split. */ if (!TARGET_SIDE_EFFECT_PREFIXES) return 0; /* The mult expression may be hidden in lreg. FIXME: Add more commentary about that. */ if (GET_CODE (val_rtx) == MULT) { mult = INTVAL (XEXP (val_rtx, 1)); val_rtx = XEXP (val_rtx, 0); code = MULT; } /* First check the "other operand". */ if (other_op >= 0) { if (GET_MODE_SIZE (GET_MODE (ops[other_op])) > UNITS_PER_WORD) return 0; /* Check if the lvalue register is the same as the "other operand". If so, the result is undefined and we shouldn't do this. FIXME: Check again. */ if ((BASE_P (ops[lreg]) && BASE_P (ops[other_op]) && REGNO (ops[lreg]) == REGNO (ops[other_op])) || rtx_equal_p (ops[other_op], ops[lreg])) return 0; } /* Do not accept frame_pointer_rtx as any operand. */ if (ops[lreg] == frame_pointer_rtx || ops[rreg] == frame_pointer_rtx || ops[rval] == frame_pointer_rtx || (other_op >= 0 && ops[other_op] == frame_pointer_rtx)) return 0; if (code == PLUS && ! BASE_P (val_rtx)) { /* Do not allow rx = rx + n if a normal add or sub with same size would do. */ if (rtx_equal_p (ops[lreg], reg_rtx) && CONST_INT_P (val_rtx) && (INTVAL (val_rtx) <= 63 && INTVAL (val_rtx) >= -63)) return 0; /* Check allowed cases, like [r(+)?].[bwd] and const. */ if (CONSTANT_P (val_rtx)) return 1; if (MEM_P (val_rtx) && BASE_OR_AUTOINCR_P (XEXP (val_rtx, 0))) return 1; if (GET_CODE (val_rtx) == SIGN_EXTEND && MEM_P (XEXP (val_rtx, 0)) && BASE_OR_AUTOINCR_P (XEXP (XEXP (val_rtx, 0), 0))) return 1; /* If we got here, it's not a valid addressing mode. */ return 0; } else if (code == MULT || (code == PLUS && BASE_P (val_rtx))) { /* Do not allow rx = rx + ry.S, since it doesn't give better code. */ if (rtx_equal_p (ops[lreg], reg_rtx) || (mult == 1 && rtx_equal_p (ops[lreg], val_rtx))) return 0; /* Do not allow bad multiply-values. */ if (mult != 1 && mult != 2 && mult != 4) return 0; /* Only allow r + ... */ if (! BASE_P (reg_rtx)) return 0; /* If we got here, all seems ok. (All checks need to be done above). */ return 1; } /* If we get here, the caller got its initial tests wrong. */ internal_error ("internal error: cris_side_effect_mode_ok with bad operands"); } /* Whether next_cc0_user of insn is LE or GT or requires a real compare insn for other reasons. */ bool cris_cc0_user_requires_cmp (rtx insn) { rtx cc0_user = NULL; rtx body; rtx set; gcc_assert (insn != NULL); if (!TARGET_V32) return false; cc0_user = next_cc0_user (insn); if (cc0_user == NULL) return false; body = PATTERN (cc0_user); set = single_set (cc0_user); /* Users can be sCC and bCC. */ if (JUMP_P (cc0_user) && GET_CODE (body) == SET && SET_DEST (body) == pc_rtx && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE && XEXP (XEXP (SET_SRC (body), 0), 0) == cc0_rtx) { return GET_CODE (XEXP (SET_SRC (body), 0)) == GT || GET_CODE (XEXP (SET_SRC (body), 0)) == LE; } else if (set) { return GET_CODE (SET_SRC (body)) == GT || GET_CODE (SET_SRC (body)) == LE; } gcc_unreachable (); } /* The function reg_overlap_mentioned_p in CVS (still as of 2001-05-16) does not handle the case where the IN operand is strict_low_part; it does handle it for X. Test-case in Axis-20010516. This function takes care of that for THIS port. FIXME: strict_low_part is going away anyway. */ static int cris_reg_overlap_mentioned_p (rtx x, rtx in) { /* The function reg_overlap_mentioned now handles when X is strict_low_part, but not when IN is a STRICT_LOW_PART. */ if (GET_CODE (in) == STRICT_LOW_PART) in = XEXP (in, 0); return reg_overlap_mentioned_p (x, in); } /* The TARGET_ASM_NAMED_SECTION worker. We just dispatch to the functions for ELF and a.out. */ void cris_target_asm_named_section (const char *name, unsigned int flags, tree decl) { if (! TARGET_ELF) default_no_named_section (name, flags, decl); else default_elf_asm_named_section (name, flags, decl); } /* Return TRUE iff X is a CONST valid for e.g. indexing. ANY_OPERAND is 0 if X is in a CALL_P insn or movsi, 1 elsewhere. */ bool cris_valid_pic_const (rtx x, bool any_operand) { gcc_assert (flag_pic); switch (GET_CODE (x)) { case CONST_INT: case CONST_DOUBLE: return true; default: ; } if (GET_CODE (x) != CONST) return false; x = XEXP (x, 0); /* Handle (const (plus (unspec .. UNSPEC_GOTREL) (const_int ...))). */ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == UNSPEC && (XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_GOTREL || XINT (XEXP (x, 0), 1) == CRIS_UNSPEC_PCREL) && CONST_INT_P (XEXP (x, 1))) x = XEXP (x, 0); if (GET_CODE (x) == UNSPEC) switch (XINT (x, 1)) { /* A PCREL operand is only valid for call and movsi. */ case CRIS_UNSPEC_PLT_PCREL: case CRIS_UNSPEC_PCREL: return !any_operand; case CRIS_UNSPEC_PLT_GOTREL: case CRIS_UNSPEC_PLTGOTREAD: case CRIS_UNSPEC_GOTREAD: case CRIS_UNSPEC_GOTREL: return true; default: gcc_unreachable (); } return cris_pic_symbol_type_of (x) == cris_no_symbol; } /* Helper function to find the right PIC-type symbol to generate, given the original (non-PIC) representation. */ enum cris_pic_symbol_type cris_pic_symbol_type_of (rtx x) { switch (GET_CODE (x)) { case SYMBOL_REF: return SYMBOL_REF_LOCAL_P (x) ? cris_rel_symbol : cris_got_symbol; case LABEL_REF: return cris_rel_symbol; case CONST: return cris_pic_symbol_type_of (XEXP (x, 0)); case PLUS: case MINUS: { enum cris_pic_symbol_type t1 = cris_pic_symbol_type_of (XEXP (x, 0)); enum cris_pic_symbol_type t2 = cris_pic_symbol_type_of (XEXP (x, 1)); gcc_assert (t1 == cris_no_symbol || t2 == cris_no_symbol); if (t1 == cris_got_symbol || t1 == cris_got_symbol) return cris_got_symbol_needing_fixup; return t1 != cris_no_symbol ? t1 : t2; } case CONST_INT: case CONST_DOUBLE: return cris_no_symbol; case UNSPEC: /* Likely an offsettability-test attempting to add a constant to a GOTREAD symbol, which can't be handled. */ return cris_invalid_pic_symbol; default: fatal_insn ("unrecognized supposed constant", x); } gcc_unreachable (); } /* The LEGITIMATE_PIC_OPERAND_P worker. */ int cris_legitimate_pic_operand (rtx x) { /* Symbols are not valid PIC operands as-is; just constants. */ return cris_valid_pic_const (x, true); } /* The ASM_OUTPUT_CASE_END worker. */ void cris_asm_output_case_end (FILE *stream, int num, rtx table) { if (TARGET_V32) { rtx whole_jump_insn = PATTERN (PREV_INSN (PREV_INSN (table))); /* This can be a SEQUENCE, meaning the delay-slot of the jump is filled. */ rtx parallel_jump = (GET_CODE (whole_jump_insn) == SEQUENCE ? PATTERN (XVECEXP (whole_jump_insn, 0, 0)) : whole_jump_insn); asm_fprintf (stream, "\t.word %LL%d-.%s\n", CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (XVECEXP (parallel_jump, 0, 0), 1), 2), 0)), (TARGET_PDEBUG ? "; default" : "")); return; } asm_fprintf (stream, "\t.word %LL%d-%LL%d%s\n", CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (XVECEXP (PATTERN (PREV_INSN (PREV_INSN (table))), 0, 0), 1), 2), 0)), num, (TARGET_PDEBUG ? "; default" : "")); } /* TARGET_HANDLE_OPTION worker. We just store the values into local variables here. Checks for correct semantics are in cris_option_override. */ static bool cris_handle_option (size_t code, const char *arg ATTRIBUTE_UNUSED, int value ATTRIBUTE_UNUSED) { switch (code) { case OPT_metrax100: target_flags |= (MASK_SVINTO + MASK_ETRAX4_ADD + MASK_ALIGN_BY_32); break; case OPT_mno_etrax100: target_flags &= ~(MASK_SVINTO + MASK_ETRAX4_ADD + MASK_ALIGN_BY_32); break; case OPT_m32_bit: case OPT_m32bit: target_flags |= (MASK_STACK_ALIGN + MASK_CONST_ALIGN + MASK_DATA_ALIGN + MASK_ALIGN_BY_32); break; case OPT_m16_bit: case OPT_m16bit: target_flags |= (MASK_STACK_ALIGN + MASK_CONST_ALIGN + MASK_DATA_ALIGN); break; case OPT_m8_bit: case OPT_m8bit: target_flags &= ~(MASK_STACK_ALIGN + MASK_CONST_ALIGN + MASK_DATA_ALIGN); break; default: break; } CRIS_SUBTARGET_HANDLE_OPTION(code, arg, value); return true; } /* The TARGET_OPTION_OVERRIDE worker. As is the norm, this also parses -mfoo=bar type parameters. */ static void cris_option_override (void) { if (cris_max_stackframe_str) { cris_max_stackframe = atoi (cris_max_stackframe_str); /* Do some sanity checking. */ if (cris_max_stackframe < 0 || cris_max_stackframe > 0x20000000) internal_error ("-max-stackframe=%d is not usable, not between 0 and %d", cris_max_stackframe, 0x20000000); } /* Let "-metrax4" and "-metrax100" change the cpu version. */ if (TARGET_SVINTO && cris_cpu_version < CRIS_CPU_SVINTO) cris_cpu_version = CRIS_CPU_SVINTO; else if (TARGET_ETRAX4_ADD && cris_cpu_version < CRIS_CPU_ETRAX4) cris_cpu_version = CRIS_CPU_ETRAX4; /* Parse -march=... and its synonym, the deprecated -mcpu=... */ if (cris_cpu_str) { cris_cpu_version = (*cris_cpu_str == 'v' ? atoi (cris_cpu_str + 1) : -1); if (strcmp ("etrax4", cris_cpu_str) == 0) cris_cpu_version = 3; if (strcmp ("svinto", cris_cpu_str) == 0 || strcmp ("etrax100", cris_cpu_str) == 0) cris_cpu_version = 8; if (strcmp ("ng", cris_cpu_str) == 0 || strcmp ("etrax100lx", cris_cpu_str) == 0) cris_cpu_version = 10; if (cris_cpu_version < 0 || cris_cpu_version > 32) error ("unknown CRIS version specification in -march= or -mcpu= : %s", cris_cpu_str); /* Set the target flags. */ if (cris_cpu_version >= CRIS_CPU_ETRAX4) target_flags |= MASK_ETRAX4_ADD; /* If this is Svinto or higher, align for 32 bit accesses. */ if (cris_cpu_version >= CRIS_CPU_SVINTO) target_flags |= (MASK_SVINTO | MASK_ALIGN_BY_32 | MASK_STACK_ALIGN | MASK_CONST_ALIGN | MASK_DATA_ALIGN); /* Note that we do not add new flags when it can be completely described with a macro that uses -mcpu=X. So TARGET_HAS_MUL_INSNS is (cris_cpu_version >= CRIS_CPU_NG). */ } if (cris_tune_str) { int cris_tune = (*cris_tune_str == 'v' ? atoi (cris_tune_str + 1) : -1); if (strcmp ("etrax4", cris_tune_str) == 0) cris_tune = 3; if (strcmp ("svinto", cris_tune_str) == 0 || strcmp ("etrax100", cris_tune_str) == 0) cris_tune = 8; if (strcmp ("ng", cris_tune_str) == 0 || strcmp ("etrax100lx", cris_tune_str) == 0) cris_tune = 10; if (cris_tune < 0 || cris_tune > 32) error ("unknown CRIS cpu version specification in -mtune= : %s", cris_tune_str); if (cris_tune >= CRIS_CPU_SVINTO) /* We have currently nothing more to tune than alignment for memory accesses. */ target_flags |= (MASK_STACK_ALIGN | MASK_CONST_ALIGN | MASK_DATA_ALIGN | MASK_ALIGN_BY_32); } if (cris_cpu_version >= CRIS_CPU_V32) target_flags &= ~(MASK_SIDE_EFFECT_PREFIXES|MASK_MUL_BUG); if (flag_pic) { /* Use error rather than warning, so invalid use is easily detectable. Still change to the values we expect, to avoid further errors. */ if (! TARGET_LINUX) { error ("-fPIC and -fpic are not supported in this configuration"); flag_pic = 0; } /* Turn off function CSE. We need to have the addresses reach the call expanders to get PLT-marked, as they could otherwise be compared against zero directly or indirectly. After visiting the call expanders they will then be cse:ed, as the call expanders force_reg the addresses, effectively forcing flag_no_function_cse to 0. */ flag_no_function_cse = 1; } if (write_symbols == DWARF2_DEBUG && ! TARGET_ELF) { warning (0, "that particular -g option is invalid with -maout and -melinux"); write_symbols = DBX_DEBUG; } /* Set the per-function-data initializer. */ init_machine_status = cris_init_machine_status; } /* The TARGET_ASM_OUTPUT_MI_THUNK worker. */ static void cris_asm_output_mi_thunk (FILE *stream, tree thunkdecl ATTRIBUTE_UNUSED, HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED, tree funcdecl) { if (delta > 0) fprintf (stream, "\tadd%s " HOST_WIDE_INT_PRINT_DEC ",$%s\n", ADDITIVE_SIZE_MODIFIER (delta), delta, reg_names[CRIS_FIRST_ARG_REG]); else if (delta < 0) fprintf (stream, "\tsub%s " HOST_WIDE_INT_PRINT_DEC ",$%s\n", ADDITIVE_SIZE_MODIFIER (-delta), -delta, reg_names[CRIS_FIRST_ARG_REG]); if (flag_pic) { const char *name = XSTR (XEXP (DECL_RTL (funcdecl), 0), 0); name = (* targetm.strip_name_encoding) (name); if (TARGET_V32) { fprintf (stream, "\tba "); assemble_name (stream, name); fprintf (stream, "%s\n", CRIS_PLT_PCOFFSET_SUFFIX); } else { fprintf (stream, "add.d "); assemble_name (stream, name); fprintf (stream, "%s,$pc\n", CRIS_PLT_PCOFFSET_SUFFIX); } } else { fprintf (stream, "jump "); assemble_name (stream, XSTR (XEXP (DECL_RTL (funcdecl), 0), 0)); fprintf (stream, "\n"); if (TARGET_V32) fprintf (stream, "\tnop\n"); } } /* Boilerplate emitted at start of file. NO_APP *only at file start* means faster assembly. It also means comments are not allowed. In some cases comments will be output for debugging purposes. Make sure they are allowed then. We want a .file directive only if TARGET_ELF. */ static void cris_file_start (void) { /* These expressions can vary at run time, so we cannot put them into TARGET_INITIALIZER. */ targetm.asm_file_start_app_off = !(TARGET_PDEBUG || flag_print_asm_name); targetm.asm_file_start_file_directive = TARGET_ELF; default_file_start (); } /* Rename the function calls for integer multiply and divide. */ static void cris_init_libfuncs (void) { set_optab_libfunc (smul_optab, SImode, "__Mul"); set_optab_libfunc (sdiv_optab, SImode, "__Div"); set_optab_libfunc (udiv_optab, SImode, "__Udiv"); set_optab_libfunc (smod_optab, SImode, "__Mod"); set_optab_libfunc (umod_optab, SImode, "__Umod"); } /* The INIT_EXPANDERS worker sets the per-function-data initializer and mark functions. */ void cris_init_expanders (void) { /* Nothing here at the moment. */ } /* Zero initialization is OK for all current fields. */ static struct machine_function * cris_init_machine_status (void) { return ggc_alloc_cleared_machine_function (); } /* Split a 2 word move (DI or presumably DF) into component parts. Originally a copy of gen_split_move_double in m32r.c. */ rtx cris_split_movdx (rtx *operands) { enum machine_mode mode = GET_MODE (operands[0]); rtx dest = operands[0]; rtx src = operands[1]; rtx val; /* We used to have to handle (SUBREG (MEM)) here, but that should no longer happen; after reload there are no SUBREGs any more, and we're only called after reload. */ CRIS_ASSERT (GET_CODE (dest) != SUBREG && GET_CODE (src) != SUBREG); start_sequence (); if (REG_P (dest)) { int dregno = REGNO (dest); /* Reg-to-reg copy. */ if (REG_P (src)) { int sregno = REGNO (src); int reverse = (dregno == sregno + 1); /* We normally copy the low-numbered register first. However, if the first register operand 0 is the same as the second register of operand 1, we must copy in the opposite order. */ emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, reverse, TRUE, mode), operand_subword (src, reverse, TRUE, mode))); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, !reverse, TRUE, mode), operand_subword (src, !reverse, TRUE, mode))); } /* Constant-to-reg copy. */ else if (CONST_INT_P (src) || GET_CODE (src) == CONST_DOUBLE) { rtx words[2]; split_double (src, &words[0], &words[1]); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, 0, TRUE, mode), words[0])); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, 1, TRUE, mode), words[1])); } /* Mem-to-reg copy. */ else if (MEM_P (src)) { /* If the high-address word is used in the address, we must load it last. Otherwise, load it first. */ rtx addr = XEXP (src, 0); int reverse = (refers_to_regno_p (dregno, dregno + 1, addr, NULL) != 0); /* The original code implies that we can't do move.x [rN+],rM move.x [rN],rM+1 when rN is dead, because of REG_NOTES damage. That is consistent with what I've seen, so don't try it. We have two different cases here; if the addr is POST_INC, just pass it through, otherwise add constants. */ if (GET_CODE (addr) == POST_INC) { rtx mem; rtx insn; /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ mem = change_address (src, SImode, addr); insn = gen_rtx_SET (VOIDmode, operand_subword (dest, 0, TRUE, mode), mem); insn = emit_insn (insn); if (GET_CODE (XEXP (mem, 0)) == POST_INC) REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), REG_NOTES (insn)); mem = copy_rtx (mem); insn = gen_rtx_SET (VOIDmode, operand_subword (dest, 1, TRUE, mode), mem); insn = emit_insn (insn); if (GET_CODE (XEXP (mem, 0)) == POST_INC) REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), REG_NOTES (insn)); } else { /* Make sure we don't get any other addresses with embedded postincrements. They should be stopped in GO_IF_LEGITIMATE_ADDRESS, but we're here for your safety. */ if (side_effects_p (addr)) fatal_insn ("unexpected side-effects in address", addr); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, reverse, TRUE, mode), change_address (src, SImode, plus_constant (addr, reverse * UNITS_PER_WORD)))); emit_insn (gen_rtx_SET (VOIDmode, operand_subword (dest, ! reverse, TRUE, mode), change_address (src, SImode, plus_constant (addr, (! reverse) * UNITS_PER_WORD)))); } } else internal_error ("unknown src"); } /* Reg-to-mem copy or clear mem. */ else if (MEM_P (dest) && (REG_P (src) || src == const0_rtx || src == CONST0_RTX (DFmode))) { rtx addr = XEXP (dest, 0); if (GET_CODE (addr) == POST_INC) { rtx mem; rtx insn; /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ mem = change_address (dest, SImode, addr); insn = gen_rtx_SET (VOIDmode, mem, operand_subword (src, 0, TRUE, mode)); insn = emit_insn (insn); if (GET_CODE (XEXP (mem, 0)) == POST_INC) REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), REG_NOTES (insn)); mem = copy_rtx (mem); insn = gen_rtx_SET (VOIDmode, mem, operand_subword (src, 1, TRUE, mode)); insn = emit_insn (insn); if (GET_CODE (XEXP (mem, 0)) == POST_INC) REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, XEXP (XEXP (mem, 0), 0), REG_NOTES (insn)); } else { /* Make sure we don't get any other addresses with embedded postincrements. They should be stopped in GO_IF_LEGITIMATE_ADDRESS, but we're here for your safety. */ if (side_effects_p (addr)) fatal_insn ("unexpected side-effects in address", addr); emit_insn (gen_rtx_SET (VOIDmode, change_address (dest, SImode, addr), operand_subword (src, 0, TRUE, mode))); emit_insn (gen_rtx_SET (VOIDmode, change_address (dest, SImode, plus_constant (addr, UNITS_PER_WORD)), operand_subword (src, 1, TRUE, mode))); } } else internal_error ("unknown dest"); val = get_insns (); end_sequence (); return val; } /* The expander for the prologue pattern name. */ void cris_expand_prologue (void) { int regno; int size = get_frame_size (); /* Shorten the used name for readability. */ int cfoa_size = crtl->outgoing_args_size; int last_movem_reg = -1; int framesize = 0; rtx mem, insn; int return_address_on_stack = cris_return_address_on_stack (); int got_really_used = false; int n_movem_regs = 0; int pretend = crtl->args.pretend_args_size; /* Don't do anything if no prologues or epilogues are wanted. */ if (!TARGET_PROLOGUE_EPILOGUE) return; CRIS_ASSERT (size >= 0); if (crtl->uses_pic_offset_table) { /* A reference may have been optimized out (like the abort () in fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that it's still used. */ push_topmost_sequence (); got_really_used = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); pop_topmost_sequence (); } /* Align the size to what's best for the CPU model. */ if (TARGET_STACK_ALIGN) size = TARGET_ALIGN_BY_32 ? (size + 3) & ~3 : (size + 1) & ~1; if (pretend) { /* See also cris_setup_incoming_varargs where cfun->machine->stdarg_regs is set. There are other setters of crtl->args.pretend_args_size than stdarg handling, like for an argument passed with parts in R13 and stack. We must not store R13 into the pretend-area for that case, as GCC does that itself. "Our" store would be marked as redundant and GCC will attempt to remove it, which will then be flagged as an internal error; trying to remove a frame-related insn. */ int stdarg_regs = cfun->machine->stdarg_regs; framesize += pretend; for (regno = CRIS_FIRST_ARG_REG + CRIS_MAX_ARGS_IN_REGS - 1; stdarg_regs > 0; regno--, pretend -= 4, stdarg_regs--) { insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -4))); /* FIXME: When dwarf2 frame output and unless asynchronous exceptions, make dwarf2 bundle together all stack adjustments like it does for registers between stack adjustments. */ RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (SImode, stack_pointer_rtx); set_mem_alias_set (mem, get_varargs_alias_set ()); insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, regno)); /* Note the absence of RTX_FRAME_RELATED_P on the above insn: the value isn't restored, so we don't want to tell dwarf2 that it's been stored to stack, else EH handling info would get confused. */ } /* For other setters of crtl->args.pretend_args_size, we just adjust the stack by leaving the remaining size in "pretend", handled below. */ } /* Save SRP if not a leaf function. */ if (return_address_on_stack) { insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -4 - pretend))); pretend = 0; RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (SImode, stack_pointer_rtx); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM)); RTX_FRAME_RELATED_P (insn) = 1; framesize += 4; } /* Set up the frame pointer, if needed. */ if (frame_pointer_needed) { insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -4 - pretend))); pretend = 0; RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (SImode, stack_pointer_rtx); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (mem, frame_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx); RTX_FRAME_RELATED_P (insn) = 1; framesize += 4; } /* Between frame-pointer and saved registers lie the area for local variables. If we get here with "pretended" size remaining, count it into the general stack size. */ size += pretend; /* Get a contiguous sequence of registers, starting with R0, that need to be saved. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) { if (cris_reg_saved_in_regsave_area (regno, got_really_used)) { n_movem_regs++; /* Check if movem may be used for registers so far. */ if (regno == last_movem_reg + 1) /* Yes, update next expected register. */ last_movem_reg = regno; else { /* We cannot use movem for all registers. We have to flush any movem:ed registers we got so far. */ if (last_movem_reg != -1) { int n_saved = (n_movem_regs == 1) ? 1 : last_movem_reg + 1; /* It is a win to use a side-effect assignment for 64 <= size <= 128. But side-effect on movem was not usable for CRIS v0..3. Also only do it if side-effects insns are allowed. */ if ((last_movem_reg + 1) * 4 + size >= 64 && (last_movem_reg + 1) * 4 + size <= 128 && (cris_cpu_version >= CRIS_CPU_SVINTO || n_saved == 1) && TARGET_SIDE_EFFECT_PREFIXES) { mem = gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, -(n_saved * 4 + size))); set_mem_alias_set (mem, get_frame_alias_set ()); insn = cris_emit_movem_store (mem, GEN_INT (n_saved), -(n_saved * 4 + size), true); } else { insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -(n_saved * 4 + size))); insn = emit_insn (insn); RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (SImode, stack_pointer_rtx); set_mem_alias_set (mem, get_frame_alias_set ()); insn = cris_emit_movem_store (mem, GEN_INT (n_saved), 0, true); } framesize += n_saved * 4 + size; last_movem_reg = -1; size = 0; } insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -4 - size))); RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (SImode, stack_pointer_rtx); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (mem, gen_rtx_raw_REG (SImode, regno)); RTX_FRAME_RELATED_P (insn) = 1; framesize += 4 + size; size = 0; } } } /* Check after, if we could movem all registers. This is the normal case. */ if (last_movem_reg != -1) { int n_saved = (n_movem_regs == 1) ? 1 : last_movem_reg + 1; /* Side-effect on movem was not usable for CRIS v0..3. Also only do it if side-effects insns are allowed. */ if ((last_movem_reg + 1) * 4 + size >= 64 && (last_movem_reg + 1) * 4 + size <= 128 && (cris_cpu_version >= CRIS_CPU_SVINTO || n_saved == 1) && TARGET_SIDE_EFFECT_PREFIXES) { mem = gen_rtx_MEM (SImode, plus_constant (stack_pointer_rtx, -(n_saved * 4 + size))); set_mem_alias_set (mem, get_frame_alias_set ()); insn = cris_emit_movem_store (mem, GEN_INT (n_saved), -(n_saved * 4 + size), true); } else { insn = gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -(n_saved * 4 + size))); insn = emit_insn (insn); RTX_FRAME_RELATED_P (insn) = 1; mem = gen_rtx_MEM (SImode, stack_pointer_rtx); set_mem_alias_set (mem, get_frame_alias_set ()); insn = cris_emit_movem_store (mem, GEN_INT (n_saved), 0, true); } framesize += n_saved * 4 + size; /* We have to put outgoing argument space after regs. */ if (cfoa_size) { insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -cfoa_size))); RTX_FRAME_RELATED_P (insn) = 1; framesize += cfoa_size; } } else if ((size + cfoa_size) > 0) { insn = emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, -(cfoa_size + size)))); RTX_FRAME_RELATED_P (insn) = 1; framesize += size + cfoa_size; } /* Set up the PIC register, if it is used. */ if (got_really_used) { rtx got = gen_rtx_UNSPEC (SImode, gen_rtvec (1, const0_rtx), CRIS_UNSPEC_GOT); emit_move_insn (pic_offset_table_rtx, got); /* FIXME: This is a cover-up for flow2 messing up; it doesn't follow exceptional paths and tries to delete the GOT load as unused, if it isn't used on the non-exceptional paths. Other ports have similar or other cover-ups, or plain bugs marking the GOT register load as maybe-dead. To see this, remove the line below and try libsupc++/vec.cc or a trivial "static void y (); void x () {try {y ();} catch (...) {}}". */ emit_use (pic_offset_table_rtx); } if (cris_max_stackframe && framesize > cris_max_stackframe) warning (0, "stackframe too big: %d bytes", framesize); } /* The expander for the epilogue pattern. */ void cris_expand_epilogue (void) { int regno; int size = get_frame_size (); int last_movem_reg = -1; int argspace_offset = crtl->outgoing_args_size; int pretend = crtl->args.pretend_args_size; rtx mem; bool return_address_on_stack = cris_return_address_on_stack (); /* A reference may have been optimized out (like the abort () in fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that it's still used. */ int got_really_used = false; int n_movem_regs = 0; if (!TARGET_PROLOGUE_EPILOGUE) return; if (crtl->uses_pic_offset_table) { /* A reference may have been optimized out (like the abort () in fde_split in unwind-dw2-fde.c, at least 3.2.1) so check that it's still used. */ push_topmost_sequence (); got_really_used = reg_used_between_p (pic_offset_table_rtx, get_insns (), NULL_RTX); pop_topmost_sequence (); } /* Align byte count of stack frame. */ if (TARGET_STACK_ALIGN) size = TARGET_ALIGN_BY_32 ? (size + 3) & ~3 : (size + 1) & ~1; /* Check how many saved regs we can movem. They start at r0 and must be contiguous. */ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++) if (cris_reg_saved_in_regsave_area (regno, got_really_used)) { n_movem_regs++; if (regno == last_movem_reg + 1) last_movem_reg = regno; else break; } /* If there was only one register that really needed to be saved through movem, don't use movem. */ if (n_movem_regs == 1) last_movem_reg = -1; /* Now emit "normal" move insns for all regs higher than the movem regs. */ for (regno = FIRST_PSEUDO_REGISTER - 1; regno > last_movem_reg; regno--) if (cris_reg_saved_in_regsave_area (regno, got_really_used)) { rtx insn; if (argspace_offset) { /* There is an area for outgoing parameters located before the saved registers. We have to adjust for that. */ emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, argspace_offset))); /* Make sure we only do this once. */ argspace_offset = 0; } mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, stack_pointer_rtx)); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (gen_rtx_raw_REG (SImode, regno), mem); /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); } /* If we have any movem-restore, do it now. */ if (last_movem_reg != -1) { rtx insn; if (argspace_offset) { emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, argspace_offset))); argspace_offset = 0; } mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, stack_pointer_rtx)); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_insn (cris_gen_movem_load (mem, GEN_INT (last_movem_reg + 1), 0)); /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ if (side_effects_p (PATTERN (insn))) REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); } /* If we don't clobber all of the allocated stack area (we've already deallocated saved registers), GCC might want to schedule loads from the stack to *after* the stack-pointer restore, which introduces an interrupt race condition. This happened for the initial-value SRP-restore for g++.dg/eh/registers1.C (noticed by inspection of other failure for that test). It also happened for the stack slot for the return value in (one version of) linux/fs/dcache.c:__d_lookup, at least with "-O2 -fno-omit-frame-pointer". */ /* Restore frame pointer if necessary. */ if (frame_pointer_needed) { rtx insn; emit_insn (gen_cris_frame_deallocated_barrier ()); emit_move_insn (stack_pointer_rtx, frame_pointer_rtx); mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, stack_pointer_rtx)); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (frame_pointer_rtx, mem); /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); } else if ((size + argspace_offset) != 0) { emit_insn (gen_cris_frame_deallocated_barrier ()); /* If there was no frame-pointer to restore sp from, we must explicitly deallocate local variables. */ /* Handle space for outgoing parameters that hasn't been handled yet. */ size += argspace_offset; emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, size))); } /* If this function has no pushed register parameters (stdargs/varargs), and if it is not a leaf function, then we have the return address on the stack. */ if (return_address_on_stack && pretend == 0) { if (TARGET_V32 || crtl->calls_eh_return) { rtx mem; rtx insn; rtx srpreg = gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM); mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, stack_pointer_rtx)); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (srpreg, mem); /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); if (crtl->calls_eh_return) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, gen_rtx_raw_REG (SImode, CRIS_STACKADJ_REG))); cris_expand_return (false); } else cris_expand_return (true); return; } /* If we pushed some register parameters, then adjust the stack for them. */ if (pretend != 0) { /* If SRP is stored on the way, we need to restore it first. */ if (return_address_on_stack) { rtx mem; rtx srpreg = gen_rtx_raw_REG (SImode, CRIS_SRP_REGNUM); rtx insn; mem = gen_rtx_MEM (SImode, gen_rtx_POST_INC (SImode, stack_pointer_rtx)); set_mem_alias_set (mem, get_frame_alias_set ()); insn = emit_move_insn (srpreg, mem); /* Whenever we emit insns with post-incremented addresses ourselves, we must add a post-inc note manually. */ REG_NOTES (insn) = alloc_EXPR_LIST (REG_INC, stack_pointer_rtx, REG_NOTES (insn)); } emit_insn (gen_rtx_SET (VOIDmode, stack_pointer_rtx, plus_constant (stack_pointer_rtx, pretend))); } /* Perform the "physical" unwinding that the EH machinery calculated. */ if (crtl->calls_eh_return) emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, gen_rtx_raw_REG (SImode, CRIS_STACKADJ_REG))); cris_expand_return (false); } /* Worker function for generating movem from mem for load_multiple. */ rtx cris_gen_movem_load (rtx src, rtx nregs_rtx, int nprefix) { int nregs = INTVAL (nregs_rtx); rtvec vec; int eltno = 1; int i; rtx srcreg = XEXP (src, 0); unsigned int regno = nregs - 1; int regno_inc = -1; if (TARGET_V32) { regno = 0; regno_inc = 1; } if (GET_CODE (srcreg) == POST_INC) srcreg = XEXP (srcreg, 0); CRIS_ASSERT (REG_P (srcreg)); /* Don't use movem for just one insn. The insns are equivalent except for the pipeline hazard (on v32); movem does not forward the loaded registers so there's a three cycles penalty for their use. */ if (nregs == 1) return gen_movsi (gen_rtx_REG (SImode, 0), src); vec = rtvec_alloc (nprefix + nregs + (GET_CODE (XEXP (src, 0)) == POST_INC)); if (GET_CODE (XEXP (src, 0)) == POST_INC) { RTVEC_ELT (vec, nprefix + 1) = gen_rtx_SET (VOIDmode, srcreg, plus_constant (srcreg, nregs * 4)); eltno++; } src = replace_equiv_address (src, srcreg); RTVEC_ELT (vec, nprefix) = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno), src); regno += regno_inc; for (i = 1; i < nregs; i++, eltno++) { RTVEC_ELT (vec, nprefix + eltno) = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, regno), adjust_address_nv (src, SImode, i * 4)); regno += regno_inc; } return gen_rtx_PARALLEL (VOIDmode, vec); } /* Worker function for generating movem to mem. If FRAME_RELATED, notes are added that the dwarf2 machinery understands. */ rtx cris_emit_movem_store (rtx dest, rtx nregs_rtx, int increment, bool frame_related) { int nregs = INTVAL (nregs_rtx); rtvec vec; int eltno = 1; int i; rtx insn; rtx destreg = XEXP (dest, 0); unsigned int regno = nregs - 1; int regno_inc = -1; if (TARGET_V32) { regno = 0; regno_inc = 1; } if (GET_CODE (destreg) == POST_INC) increment += nregs * 4; if (GET_CODE (destreg) == POST_INC || GET_CODE (destreg) == PLUS) destreg = XEXP (destreg, 0); CRIS_ASSERT (REG_P (destreg)); /* Don't use movem for just one insn. The insns are equivalent except for the pipeline hazard (on v32); movem does not forward the loaded registers so there's a three cycles penalty for use. */ if (nregs == 1) { rtx mov = gen_rtx_SET (VOIDmode, dest, gen_rtx_REG (SImode, 0)); if (increment == 0) { insn = emit_insn (mov); if (frame_related) RTX_FRAME_RELATED_P (insn) = 1; return insn; } /* If there was a request for a side-effect, create the ordinary parallel. */ vec = rtvec_alloc (2); RTVEC_ELT (vec, 0) = mov; RTVEC_ELT (vec, 1) = gen_rtx_SET (VOIDmode, destreg, plus_constant (destreg, increment)); if (frame_related) { RTX_FRAME_RELATED_P (mov) = 1; RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 1)) = 1; } } else { vec = rtvec_alloc (nregs + (increment != 0 ? 1 : 0)); RTVEC_ELT (vec, 0) = gen_rtx_SET (VOIDmode, replace_equiv_address (dest, plus_constant (destreg, increment)), gen_rtx_REG (SImode, regno)); regno += regno_inc; /* The dwarf2 info wants this mark on each component in a parallel that's part of the prologue (though it's optional on the first component). */ if (frame_related) RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 0)) = 1; if (increment != 0) { RTVEC_ELT (vec, 1) = gen_rtx_SET (VOIDmode, destreg, plus_constant (destreg, increment != 0 ? increment : nregs * 4)); eltno++; if (frame_related) RTX_FRAME_RELATED_P (RTVEC_ELT (vec, 1)) = 1; /* Don't call adjust_address_nv on a post-incremented address if we can help it. */ if (GET_CODE (XEXP (dest, 0)) == POST_INC) dest = replace_equiv_address (dest, destreg); } for (i = 1; i < nregs; i++, eltno++) { RTVEC_ELT (vec, eltno) = gen_rtx_SET (VOIDmode, adjust_address_nv (dest, SImode, i * 4), gen_rtx_REG (SImode, regno)); if (frame_related) RTX_FRAME_RELATED_P (RTVEC_ELT (vec, eltno)) = 1; regno += regno_inc; } } insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, vec)); /* Because dwarf2out.c handles the insns in a parallel as a sequence, we need to keep the stack adjustment separate, after the MEM-setters. Else the stack-adjustment in the second component of the parallel would be mishandled; the offsets for the SETs that follow it would be wrong. We prepare for this by adding a REG_FRAME_RELATED_EXPR with the MEM-setting parts in a SEQUENCE followed by the increment. Note that we have FRAME_RELATED_P on all the SETs, including the original stack adjustment SET in the parallel. */ if (frame_related) { if (increment != 0) { rtx seq = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (nregs + 1)); XVECEXP (seq, 0, 0) = copy_rtx (XVECEXP (PATTERN (insn), 0, 0)); for (i = 1; i < nregs; i++) XVECEXP (seq, 0, i) = copy_rtx (XVECEXP (PATTERN (insn), 0, i + 1)); XVECEXP (seq, 0, nregs) = copy_rtx (XVECEXP (PATTERN (insn), 0, 1)); add_reg_note (insn, REG_FRAME_RELATED_EXPR, seq); } RTX_FRAME_RELATED_P (insn) = 1; } return insn; } /* Worker function for expanding the address for PIC function calls. */ void cris_expand_pic_call_address (rtx *opp) { rtx op = *opp; gcc_assert (MEM_P (op)); op = XEXP (op, 0); /* It might be that code can be generated that jumps to 0 (or to a specific address). Don't die on that. (There is a testcase.) */ if (CONSTANT_ADDRESS_P (op) && !CONST_INT_P (op)) { enum cris_pic_symbol_type t = cris_pic_symbol_type_of (op); CRIS_ASSERT (can_create_pseudo_p ()); /* For local symbols (non-PLT), just get the plain symbol reference into a register. For symbols that can be PLT, make them PLT. */ if (t == cris_rel_symbol) { /* For v32, we're fine as-is; just PICify the symbol. Forcing into a register caused performance regression for 3.2.1, observable in __floatdidf and elsewhere in libgcc. */ if (TARGET_V32) { rtx sym = GET_CODE (op) != CONST ? op : get_related_value (op); HOST_WIDE_INT offs = get_integer_term (op); /* We can't get calls to sym+N, N integer, can we? */ gcc_assert (offs == 0); op = gen_rtx_CONST (Pmode, gen_rtx_UNSPEC (Pmode, gen_rtvec (1, sym), CRIS_UNSPEC_PCREL)); } else op = force_reg (Pmode, op); } else if (t == cris_got_symbol) { if (TARGET_AVOID_GOTPLT) { /* Change a "jsr sym" into (allocate register rM, rO) "move.d (const (unspec [sym rPIC] CRIS_UNSPEC_PLT_GOTREL)),rM" "add.d rPIC,rM,rO", "jsr rO" for pre-v32 and "jsr (const (unspec [sym rPIC] CRIS_UNSPEC_PLT_PCREL))" for v32. */ rtx tem, rm, ro; gcc_assert (can_create_pseudo_p ()); crtl->uses_pic_offset_table = 1; tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op), TARGET_V32 ? CRIS_UNSPEC_PLT_PCREL : CRIS_UNSPEC_PLT_GOTREL); tem = gen_rtx_CONST (Pmode, tem); if (TARGET_V32) op = tem; else { rm = gen_reg_rtx (Pmode); emit_move_insn (rm, tem); ro = gen_reg_rtx (Pmode); if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx, ro, 0, OPTAB_LIB_WIDEN) != ro) internal_error ("expand_binop failed in movsi got"); op = ro; } } else { /* Change a "jsr sym" into (allocate register rM, rO) "move.d (const (unspec [sym] CRIS_UNSPEC_PLTGOTREAD)),rM" "add.d rPIC,rM,rO" "jsr [rO]" with the memory access marked as not trapping and not aliasing. No "move.d [rO],rP" as that would invite to re-use of a value that should not be reused. FIXME: Need a peephole2 for cases when this is cse:d from the call, to change back to just get the PLT entry address, so we don't resolve the same symbol over and over (the memory access of the PLTGOT isn't constant). */ rtx tem, mem, rm, ro; gcc_assert (can_create_pseudo_p ()); crtl->uses_pic_offset_table = 1; tem = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, op), CRIS_UNSPEC_PLTGOTREAD); rm = gen_reg_rtx (Pmode); emit_move_insn (rm, gen_rtx_CONST (Pmode, tem)); ro = gen_reg_rtx (Pmode); if (expand_binop (Pmode, add_optab, rm, pic_offset_table_rtx, ro, 0, OPTAB_LIB_WIDEN) != ro) internal_error ("expand_binop failed in movsi got"); mem = gen_rtx_MEM (Pmode, ro); /* This MEM doesn't alias anything. Whether it aliases other same symbols is unimportant. */ set_mem_alias_set (mem, new_alias_set ()); MEM_NOTRAP_P (mem) = 1; op = mem; } } else /* Can't possibly get a GOT-needing-fixup for a function-call, right? */ fatal_insn ("unidentifiable call op", op); *opp = replace_equiv_address (*opp, op); } } /* Make sure operands are in the right order for an addsi3 insn as generated by a define_split. Nothing but REG_P as the first operand is recognized by addsi3 after reload. OPERANDS contains the operands, with the first at OPERANDS[N] and the second at OPERANDS[N+1]. */ void cris_order_for_addsi3 (rtx *operands, int n) { if (!REG_P (operands[n])) { rtx tem = operands[n]; operands[n] = operands[n + 1]; operands[n + 1] = tem; } } /* Use from within code, from e.g. PRINT_OPERAND and PRINT_OPERAND_ADDRESS. Macros used in output_addr_const need to emit different things depending on whether code operand or constant is emitted. */ static void cris_output_addr_const (FILE *file, rtx x) { in_code++; output_addr_const (file, x); in_code--; } /* Worker function for ASM_OUTPUT_SYMBOL_REF. */ void cris_asm_output_symbol_ref (FILE *file, rtx x) { gcc_assert (GET_CODE (x) == SYMBOL_REF); if (flag_pic && in_code > 0) { const char *origstr = XSTR (x, 0); const char *str; str = (* targetm.strip_name_encoding) (origstr); assemble_name (file, str); /* Sanity check. */ if (!TARGET_V32 && !crtl->uses_pic_offset_table) output_operand_lossage ("PIC register isn't set up"); } else assemble_name (file, XSTR (x, 0)); } /* Worker function for ASM_OUTPUT_LABEL_REF. */ void cris_asm_output_label_ref (FILE *file, char *buf) { if (flag_pic && in_code > 0) { assemble_name (file, buf); /* Sanity check. */ if (!TARGET_V32 && !crtl->uses_pic_offset_table) internal_error ("emitting PIC operand, but PIC register " "isn%'t set up"); } else assemble_name (file, buf); } /* Worker function for OUTPUT_ADDR_CONST_EXTRA. */ bool cris_output_addr_const_extra (FILE *file, rtx xconst) { switch (GET_CODE (xconst)) { rtx x; case UNSPEC: x = XVECEXP (xconst, 0, 0); CRIS_ASSERT (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF || GET_CODE (x) == CONST); output_addr_const (file, x); switch (XINT (xconst, 1)) { case CRIS_UNSPEC_PCREL: /* We only get this with -fpic/PIC to tell it apart from an invalid symbol. We can't tell here, but it should only be the operand of a call or movsi. */ gcc_assert (TARGET_V32 && flag_pic); break; case CRIS_UNSPEC_PLT_PCREL: gcc_assert (TARGET_V32); fprintf (file, ":PLT"); break; case CRIS_UNSPEC_PLT_GOTREL: gcc_assert (!TARGET_V32); fprintf (file, ":PLTG"); break; case CRIS_UNSPEC_GOTREL: gcc_assert (!TARGET_V32); fprintf (file, ":GOTOFF"); break; case CRIS_UNSPEC_GOTREAD: if (flag_pic == 1) fprintf (file, ":GOT16"); else fprintf (file, ":GOT"); break; case CRIS_UNSPEC_PLTGOTREAD: if (flag_pic == 1) fprintf (file, CRIS_GOTPLT_SUFFIX "16"); else fprintf (file, CRIS_GOTPLT_SUFFIX); break; default: gcc_unreachable (); } return true; default: return false; } } /* Worker function for TARGET_STRUCT_VALUE_RTX. */ static rtx cris_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED, int incoming ATTRIBUTE_UNUSED) { return gen_rtx_REG (Pmode, CRIS_STRUCT_VALUE_REGNUM); } /* Worker function for TARGET_SETUP_INCOMING_VARARGS. */ static void cris_setup_incoming_varargs (CUMULATIVE_ARGS *ca, enum machine_mode mode ATTRIBUTE_UNUSED, tree type ATTRIBUTE_UNUSED, int *pretend_arg_size, int second_time) { if (ca->regs < CRIS_MAX_ARGS_IN_REGS) { int stdarg_regs = CRIS_MAX_ARGS_IN_REGS - ca->regs; cfun->machine->stdarg_regs = stdarg_regs; *pretend_arg_size = stdarg_regs * 4; } if (TARGET_PDEBUG) fprintf (asm_out_file, "\n; VA:: ANSI: %d args before, anon @ #%d, %dtime\n", ca->regs, *pretend_arg_size, second_time); } /* Return true if TYPE must be passed by invisible reference. For cris, we pass <= 8 bytes by value, others by reference. */ static bool cris_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED, enum machine_mode mode, const_tree type, bool named ATTRIBUTE_UNUSED) { return (targetm.calls.must_pass_in_stack (mode, type) || CRIS_FUNCTION_ARG_SIZE (mode, type) > 8); } /* A combination of defining TARGET_PROMOTE_FUNCTION_MODE, promoting arguments and *not* defining TARGET_PROMOTE_PROTOTYPES or PROMOTE_MODE gives the best code size and speed for gcc, ipps and products in gcc-2.7.2. */ enum machine_mode cris_promote_function_mode (const_tree type ATTRIBUTE_UNUSED, enum machine_mode mode, int *punsignedp ATTRIBUTE_UNUSED, const_tree fntype ATTRIBUTE_UNUSED, int for_return) { /* Defining PROMOTE_FUNCTION_RETURN in gcc-2.7.2 uncovered bug 981110 (even when modifying TARGET_FUNCTION_VALUE to return the promoted mode). Maybe pointless as of now, but let's keep the old behavior. */ if (for_return == 1) return mode; return CRIS_PROMOTED_MODE (mode, *punsignedp, type); } /* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the time being. */ static rtx cris_function_value(const_tree type, const_tree func ATTRIBUTE_UNUSED, bool outgoing ATTRIBUTE_UNUSED) { return gen_rtx_REG (TYPE_MODE (type), CRIS_FIRST_ARG_REG); } /* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the time being. */ static rtx cris_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED) { return gen_rtx_REG (mode, CRIS_FIRST_ARG_REG); } /* Let's assume all functions return in r[CRIS_FIRST_ARG_REG] for the time being. */ bool cris_function_value_regno_p (const unsigned int regno) { return (regno == CRIS_FIRST_ARG_REG); } static int cris_arg_partial_bytes (CUMULATIVE_ARGS *ca, enum machine_mode mode, tree type, bool named ATTRIBUTE_UNUSED) { if (ca->regs == CRIS_MAX_ARGS_IN_REGS - 1 && !targetm.calls.must_pass_in_stack (mode, type) && CRIS_FUNCTION_ARG_SIZE (mode, type) > 4 && CRIS_FUNCTION_ARG_SIZE (mode, type) <= 8) return UNITS_PER_WORD; else return 0; } static rtx cris_function_arg_1 (const CUMULATIVE_ARGS *ca, enum machine_mode mode ATTRIBUTE_UNUSED, const_tree type ATTRIBUTE_UNUSED, bool named, bool incoming) { if ((!incoming || named) && ca->regs < CRIS_MAX_ARGS_IN_REGS) return gen_rtx_REG (mode, CRIS_FIRST_ARG_REG + ca->regs); else return NULL_RTX; } /* Worker function for TARGET_FUNCTION_ARG. The void_type_node is sent as a "closing" call. */ static rtx cris_function_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode, const_tree type, bool named) { return cris_function_arg_1 (ca, mode, type, named, false); } /* Worker function for TARGET_FUNCTION_INCOMING_ARG. The differences between this and the previous, is that this one checks that an argument is named, since incoming stdarg/varargs arguments are pushed onto the stack, and we don't have to check against the "closing" void_type_node TYPE parameter. */ static rtx cris_function_incoming_arg (CUMULATIVE_ARGS *ca, enum machine_mode mode, const_tree type, bool named) { return cris_function_arg_1 (ca, mode, type, named, true); } /* Worker function for TARGET_FUNCTION_ARG_ADVANCE. */ static void cris_function_arg_advance (CUMULATIVE_ARGS *ca, enum machine_mode mode, const_tree type, bool named ATTRIBUTE_UNUSED) { ca->regs += (3 + CRIS_FUNCTION_ARG_SIZE (mode, type)) / 4; } /* Worker function for TARGET_MD_ASM_CLOBBERS. */ static tree cris_md_asm_clobbers (tree outputs, tree inputs, tree in_clobbers) { HARD_REG_SET mof_set; tree clobbers; tree t; CLEAR_HARD_REG_SET (mof_set); SET_HARD_REG_BIT (mof_set, CRIS_MOF_REGNUM); /* For the time being, all asms clobber condition codes. Revisit when there's a reasonable use for inputs/outputs that mention condition codes. */ clobbers = tree_cons (NULL_TREE, build_string (strlen (reg_names[CRIS_CC0_REGNUM]), reg_names[CRIS_CC0_REGNUM]), in_clobbers); for (t = outputs; t != NULL; t = TREE_CHAIN (t)) { tree val = TREE_VALUE (t); /* The constraint letter for the singleton register class of MOF is 'h'. If it's mentioned in the constraints, the asm is MOF-aware and adding it to the clobbers would cause it to have impossible constraints. */ if (strchr (TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))), 'h') != NULL || tree_overlaps_hard_reg_set (val, &mof_set) != NULL_TREE) return clobbers; } for (t = inputs; t != NULL; t = TREE_CHAIN (t)) { tree val = TREE_VALUE (t); if (strchr (TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))), 'h') != NULL || tree_overlaps_hard_reg_set (val, &mof_set) != NULL_TREE) return clobbers; } return tree_cons (NULL_TREE, build_string (strlen (reg_names[CRIS_MOF_REGNUM]), reg_names[CRIS_MOF_REGNUM]), clobbers); } /* Implement TARGET_FRAME_POINTER_REQUIRED. Really only needed if the stack frame has variable length (alloca or variable sized local arguments (GNU C extension). See PR39499 and PR38609 for the reason this isn't just 0. */ bool cris_frame_pointer_required (void) { return !current_function_sp_is_unchanging; } /* Implement TARGET_ASM_TRAMPOLINE_TEMPLATE. This looks too complicated, and it is. I assigned r7 to be the static chain register, but it is call-saved, so we have to save it, and come back to restore it after the call, so we have to save srp... Anyway, trampolines are rare enough that we can cope with this somewhat lack of elegance. (Do not be tempted to "straighten up" whitespace in the asms; the assembler #NO_APP state mandates strict spacing). */ /* ??? See the i386 regparm=3 implementation that pushes the static chain value to the stack in the trampoline, and uses a call-saved register when called directly. */ static void cris_asm_trampoline_template (FILE *f) { if (TARGET_V32) { /* This normally-unused nop insn acts as an instruction to the simulator to flush its instruction cache. None of the other instructions in the trampoline template suits as a trigger for V32. The pc-relative addressing mode works nicely as a trigger for V10. FIXME: Have specific V32 template (possibly avoiding the use of a special instruction). */ fprintf (f, "\tclearf x\n"); /* We have to use a register as an intermediate, choosing semi-randomly R1 (which has to not be the STATIC_CHAIN_REGNUM), so we can use it for address indirection and jsr target. */ fprintf (f, "\tmove $r1,$mof\n"); /* +4 */ fprintf (f, "\tmove.d 0,$r1\n"); fprintf (f, "\tmove.d $%s,[$r1]\n", reg_names[STATIC_CHAIN_REGNUM]); fprintf (f, "\taddq 6,$r1\n"); fprintf (f, "\tmove $mof,[$r1]\n"); fprintf (f, "\taddq 6,$r1\n"); fprintf (f, "\tmove $srp,[$r1]\n"); /* +20 */ fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); /* +26 */ fprintf (f, "\tmove.d 0,$r1\n"); fprintf (f, "\tjsr $r1\n"); fprintf (f, "\tsetf\n"); /* +36 */ fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); /* +42 */ fprintf (f, "\tmove.d 0,$r1\n"); /* +48 */ fprintf (f, "\tmove.d 0,$r9\n"); fprintf (f, "\tjump $r9\n"); fprintf (f, "\tsetf\n"); } else { fprintf (f, "\tmove.d $%s,[$pc+20]\n", reg_names[STATIC_CHAIN_REGNUM]); fprintf (f, "\tmove $srp,[$pc+22]\n"); fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); fprintf (f, "\tjsr 0\n"); fprintf (f, "\tmove.d 0,$%s\n", reg_names[STATIC_CHAIN_REGNUM]); fprintf (f, "\tjump 0\n"); } } /* Implement TARGET_TRAMPOLINE_INIT. */ static void cris_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) { rtx fnaddr = XEXP (DECL_RTL (fndecl), 0); rtx tramp = XEXP (m_tramp, 0); rtx mem; emit_block_move (m_tramp, assemble_trampoline_template (), GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL); if (TARGET_V32) { mem = adjust_address (m_tramp, SImode, 6); emit_move_insn (mem, plus_constant (tramp, 38)); mem = adjust_address (m_tramp, SImode, 22); emit_move_insn (mem, chain_value); mem = adjust_address (m_tramp, SImode, 28); emit_move_insn (mem, fnaddr); } else { mem = adjust_address (m_tramp, SImode, 10); emit_move_insn (mem, chain_value); mem = adjust_address (m_tramp, SImode, 16); emit_move_insn (mem, fnaddr); } /* Note that there is no need to do anything with the cache for sake of a trampoline. */ } #if 0 /* Various small functions to replace macros. Only called from a debugger. They might collide with gcc functions or system functions, so only emit them when '#if 1' above. */ enum rtx_code Get_code (rtx); enum rtx_code Get_code (rtx x) { return GET_CODE (x); } const char *Get_mode (rtx); const char * Get_mode (rtx x) { return GET_MODE_NAME (GET_MODE (x)); } rtx Xexp (rtx, int); rtx Xexp (rtx x, int n) { return XEXP (x, n); } rtx Xvecexp (rtx, int, int); rtx Xvecexp (rtx x, int n, int m) { return XVECEXP (x, n, m); } int Get_rtx_len (rtx); int Get_rtx_len (rtx x) { return GET_RTX_LENGTH (GET_CODE (x)); } /* Use upper-case to distinguish from local variables that are sometimes called next_insn and prev_insn. */ rtx Next_insn (rtx); rtx Next_insn (rtx insn) { return NEXT_INSN (insn); } rtx Prev_insn (rtx); rtx Prev_insn (rtx insn) { return PREV_INSN (insn); } #endif #include "gt-cris.h" /* * Local variables: * eval: (c-set-style "gnu") * indent-tabs-mode: t * End: */
29.065812
83
0.647188
[ "model" ]
aed05d57e6a41bf54ee0ff2120dc7814ff4a8104
443
h
C
src/image.h
danpla/dpfontbaker
08d3b9ba1f25d112f68f6eeab71e9127ba530543
[ "Zlib" ]
3
2019-12-13T02:46:01.000Z
2021-02-03T00:50:29.000Z
src/image.h
danpla/dpfontbaker
08d3b9ba1f25d112f68f6eeab71e9127ba530543
[ "Zlib" ]
1
2020-06-19T11:52:43.000Z
2020-06-20T06:02:58.000Z
src/image.h
danpla/dpfontbaker
08d3b9ba1f25d112f68f6eeab71e9127ba530543
[ "Zlib" ]
1
2020-05-04T10:31:24.000Z
2020-05-04T10:31:24.000Z
#pragma once #include <cstdint> #include <vector> namespace dpfb { class Image { public: Image(std::uint8_t* data, int w, int h, int pitch); Image(int w, int h); int getWidth() const; int getHeight() const; int getPitch() const; std::uint8_t* getData(); const std::uint8_t* getData() const; private: int w; int h; int pitch; std::uint8_t* data; std::vector<std::uint8_t> ownData; }; }
13.84375
55
0.609481
[ "vector" ]
aed33bb0a8c3a4bdb8f6f109b2b4ded8fb132604
90,874
c
C
deps/mozjs/incs/nss/nss/lib/softoken/fipstest.c
ktrzeciaknubisa/jxcore-binary-packaging
5759df084be10a259a4a4f1b38c214c6084a7c0f
[ "Apache-2.0" ]
2,494
2015-02-11T04:34:13.000Z
2022-03-31T14:21:47.000Z
deps/mozjs/incs/nss/nss/lib/softoken/fipstest.c
ktrzeciaknubisa/jxcore-binary-packaging
5759df084be10a259a4a4f1b38c214c6084a7c0f
[ "Apache-2.0" ]
685
2015-02-11T17:14:26.000Z
2021-04-13T09:58:39.000Z
deps/mozjs/incs/nss/nss/lib/softoken/fipstest.c
ktrzeciaknubisa/jxcore-binary-packaging
5759df084be10a259a4a4f1b38c214c6084a7c0f
[ "Apache-2.0" ]
442
2015-02-12T13:45:46.000Z
2022-03-21T05:28:05.000Z
/* * PKCS #11 FIPS Power-Up Self Test. * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include "softoken.h" /* Required for RC2-ECB, RC2-CBC, RC4, DES-ECB, */ /* DES-CBC, DES3-ECB, DES3-CBC, RSA */ /* and DSA. */ #include "seccomon.h" /* Required for RSA and DSA. */ #include "lowkeyi.h" /* Required for RSA and DSA. */ #include "pkcs11.h" /* Required for PKCS #11. */ #include "secerr.h" #ifndef NSS_DISABLE_ECC #include "ec.h" /* Required for ECDSA */ #endif /* FIPS preprocessor directives for RC2-ECB and RC2-CBC. */ #define FIPS_RC2_KEY_LENGTH 5 /* 40-bits */ #define FIPS_RC2_ENCRYPT_LENGTH 8 /* 64-bits */ #define FIPS_RC2_DECRYPT_LENGTH 8 /* 64-bits */ /* FIPS preprocessor directives for RC4. */ #define FIPS_RC4_KEY_LENGTH 5 /* 40-bits */ #define FIPS_RC4_ENCRYPT_LENGTH 8 /* 64-bits */ #define FIPS_RC4_DECRYPT_LENGTH 8 /* 64-bits */ /* FIPS preprocessor directives for DES-ECB and DES-CBC. */ #define FIPS_DES_ENCRYPT_LENGTH 8 /* 64-bits */ #define FIPS_DES_DECRYPT_LENGTH 8 /* 64-bits */ /* FIPS preprocessor directives for DES3-CBC and DES3-ECB. */ #define FIPS_DES3_ENCRYPT_LENGTH 8 /* 64-bits */ #define FIPS_DES3_DECRYPT_LENGTH 8 /* 64-bits */ /* FIPS preprocessor directives for AES-ECB and AES-CBC. */ #define FIPS_AES_BLOCK_SIZE 16 /* 128-bits */ #define FIPS_AES_ENCRYPT_LENGTH 16 /* 128-bits */ #define FIPS_AES_DECRYPT_LENGTH 16 /* 128-bits */ #define FIPS_AES_128_KEY_SIZE 16 /* 128-bits */ #define FIPS_AES_192_KEY_SIZE 24 /* 192-bits */ #define FIPS_AES_256_KEY_SIZE 32 /* 256-bits */ /* FIPS preprocessor directives for message digests */ #define FIPS_KNOWN_HASH_MESSAGE_LENGTH 64 /* 512-bits */ /* FIPS preprocessor directives for RSA. */ #define FIPS_RSA_TYPE siBuffer #define FIPS_RSA_PUBLIC_EXPONENT_LENGTH 3 /* 24-bits */ #define FIPS_RSA_PRIVATE_VERSION_LENGTH 1 /* 8-bits */ #define FIPS_RSA_MESSAGE_LENGTH 256 /* 2048-bits */ #define FIPS_RSA_COEFFICIENT_LENGTH 128 /* 1024-bits */ #define FIPS_RSA_PRIME0_LENGTH 128 /* 1024-bits */ #define FIPS_RSA_PRIME1_LENGTH 128 /* 1024-bits */ #define FIPS_RSA_EXPONENT0_LENGTH 128 /* 1024-bits */ #define FIPS_RSA_EXPONENT1_LENGTH 128 /* 1024-bits */ #define FIPS_RSA_PRIVATE_EXPONENT_LENGTH 256 /* 2048-bits */ #define FIPS_RSA_ENCRYPT_LENGTH 256 /* 2048-bits */ #define FIPS_RSA_DECRYPT_LENGTH 256 /* 2048-bits */ #define FIPS_RSA_SIGNATURE_LENGTH 256 /* 2048-bits */ #define FIPS_RSA_MODULUS_LENGTH 256 /* 2048-bits */ /* FIPS preprocessor directives for DSA. */ #define FIPS_DSA_TYPE siBuffer #define FIPS_DSA_DIGEST_LENGTH 20 /* 160-bits */ #define FIPS_DSA_SUBPRIME_LENGTH 20 /* 160-bits */ #define FIPS_DSA_SIGNATURE_LENGTH 40 /* 320-bits */ #define FIPS_DSA_PRIME_LENGTH 128 /* 1024-bits */ #define FIPS_DSA_BASE_LENGTH 128 /* 1024-bits */ /* FIPS preprocessor directives for RNG. */ #define FIPS_RNG_XKEY_LENGTH 32 /* 256-bits */ static CK_RV sftk_fips_RC2_PowerUpSelfTest( void ) { /* RC2 Known Key (40-bits). */ static const PRUint8 rc2_known_key[] = { "RSARC" }; /* RC2-CBC Known Initialization Vector (64-bits). */ static const PRUint8 rc2_cbc_known_initialization_vector[] = {"Security"}; /* RC2 Known Plaintext (64-bits). */ static const PRUint8 rc2_ecb_known_plaintext[] = {"Netscape"}; static const PRUint8 rc2_cbc_known_plaintext[] = {"Netscape"}; /* RC2 Known Ciphertext (64-bits). */ static const PRUint8 rc2_ecb_known_ciphertext[] = { 0x1a,0x71,0x33,0x54,0x8d,0x5c,0xd2,0x30}; static const PRUint8 rc2_cbc_known_ciphertext[] = { 0xff,0x41,0xdb,0x94,0x8a,0x4c,0x33,0xb3}; /* RC2 variables. */ PRUint8 rc2_computed_ciphertext[FIPS_RC2_ENCRYPT_LENGTH]; PRUint8 rc2_computed_plaintext[FIPS_RC2_DECRYPT_LENGTH]; RC2Context * rc2_context; unsigned int rc2_bytes_encrypted; unsigned int rc2_bytes_decrypted; SECStatus rc2_status; /******************************************************/ /* RC2-ECB Single-Round Known Answer Encryption Test: */ /******************************************************/ rc2_context = RC2_CreateContext( rc2_known_key, FIPS_RC2_KEY_LENGTH, NULL, NSS_RC2, FIPS_RC2_KEY_LENGTH ); if( rc2_context == NULL ) return( CKR_HOST_MEMORY ); rc2_status = RC2_Encrypt( rc2_context, rc2_computed_ciphertext, &rc2_bytes_encrypted, FIPS_RC2_ENCRYPT_LENGTH, rc2_ecb_known_plaintext, FIPS_RC2_DECRYPT_LENGTH ); RC2_DestroyContext( rc2_context, PR_TRUE ); if( ( rc2_status != SECSuccess ) || ( rc2_bytes_encrypted != FIPS_RC2_ENCRYPT_LENGTH ) || ( PORT_Memcmp( rc2_computed_ciphertext, rc2_ecb_known_ciphertext, FIPS_RC2_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* RC2-ECB Single-Round Known Answer Decryption Test: */ /******************************************************/ rc2_context = RC2_CreateContext( rc2_known_key, FIPS_RC2_KEY_LENGTH, NULL, NSS_RC2, FIPS_RC2_KEY_LENGTH ); if( rc2_context == NULL ) return( CKR_HOST_MEMORY ); rc2_status = RC2_Decrypt( rc2_context, rc2_computed_plaintext, &rc2_bytes_decrypted, FIPS_RC2_DECRYPT_LENGTH, rc2_ecb_known_ciphertext, FIPS_RC2_ENCRYPT_LENGTH ); RC2_DestroyContext( rc2_context, PR_TRUE ); if( ( rc2_status != SECSuccess ) || ( rc2_bytes_decrypted != FIPS_RC2_DECRYPT_LENGTH ) || ( PORT_Memcmp( rc2_computed_plaintext, rc2_ecb_known_plaintext, FIPS_RC2_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* RC2-CBC Single-Round Known Answer Encryption Test: */ /******************************************************/ rc2_context = RC2_CreateContext( rc2_known_key, FIPS_RC2_KEY_LENGTH, rc2_cbc_known_initialization_vector, NSS_RC2_CBC, FIPS_RC2_KEY_LENGTH ); if( rc2_context == NULL ) return( CKR_HOST_MEMORY ); rc2_status = RC2_Encrypt( rc2_context, rc2_computed_ciphertext, &rc2_bytes_encrypted, FIPS_RC2_ENCRYPT_LENGTH, rc2_cbc_known_plaintext, FIPS_RC2_DECRYPT_LENGTH ); RC2_DestroyContext( rc2_context, PR_TRUE ); if( ( rc2_status != SECSuccess ) || ( rc2_bytes_encrypted != FIPS_RC2_ENCRYPT_LENGTH ) || ( PORT_Memcmp( rc2_computed_ciphertext, rc2_cbc_known_ciphertext, FIPS_RC2_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* RC2-CBC Single-Round Known Answer Decryption Test: */ /******************************************************/ rc2_context = RC2_CreateContext( rc2_known_key, FIPS_RC2_KEY_LENGTH, rc2_cbc_known_initialization_vector, NSS_RC2_CBC, FIPS_RC2_KEY_LENGTH ); if( rc2_context == NULL ) return( CKR_HOST_MEMORY ); rc2_status = RC2_Decrypt( rc2_context, rc2_computed_plaintext, &rc2_bytes_decrypted, FIPS_RC2_DECRYPT_LENGTH, rc2_cbc_known_ciphertext, FIPS_RC2_ENCRYPT_LENGTH ); RC2_DestroyContext( rc2_context, PR_TRUE ); if( ( rc2_status != SECSuccess ) || ( rc2_bytes_decrypted != FIPS_RC2_DECRYPT_LENGTH ) || ( PORT_Memcmp( rc2_computed_plaintext, rc2_ecb_known_plaintext, FIPS_RC2_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fips_RC4_PowerUpSelfTest( void ) { /* RC4 Known Key (40-bits). */ static const PRUint8 rc4_known_key[] = { "RSARC" }; /* RC4 Known Plaintext (64-bits). */ static const PRUint8 rc4_known_plaintext[] = { "Netscape" }; /* RC4 Known Ciphertext (64-bits). */ static const PRUint8 rc4_known_ciphertext[] = { 0x29,0x33,0xc7,0x9a,0x9d,0x6c,0x09,0xdd}; /* RC4 variables. */ PRUint8 rc4_computed_ciphertext[FIPS_RC4_ENCRYPT_LENGTH]; PRUint8 rc4_computed_plaintext[FIPS_RC4_DECRYPT_LENGTH]; RC4Context * rc4_context; unsigned int rc4_bytes_encrypted; unsigned int rc4_bytes_decrypted; SECStatus rc4_status; /**************************************************/ /* RC4 Single-Round Known Answer Encryption Test: */ /**************************************************/ rc4_context = RC4_CreateContext( rc4_known_key, FIPS_RC4_KEY_LENGTH ); if( rc4_context == NULL ) return( CKR_HOST_MEMORY ); rc4_status = RC4_Encrypt( rc4_context, rc4_computed_ciphertext, &rc4_bytes_encrypted, FIPS_RC4_ENCRYPT_LENGTH, rc4_known_plaintext, FIPS_RC4_DECRYPT_LENGTH ); RC4_DestroyContext( rc4_context, PR_TRUE ); if( ( rc4_status != SECSuccess ) || ( rc4_bytes_encrypted != FIPS_RC4_ENCRYPT_LENGTH ) || ( PORT_Memcmp( rc4_computed_ciphertext, rc4_known_ciphertext, FIPS_RC4_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /**************************************************/ /* RC4 Single-Round Known Answer Decryption Test: */ /**************************************************/ rc4_context = RC4_CreateContext( rc4_known_key, FIPS_RC4_KEY_LENGTH ); if( rc4_context == NULL ) return( CKR_HOST_MEMORY ); rc4_status = RC4_Decrypt( rc4_context, rc4_computed_plaintext, &rc4_bytes_decrypted, FIPS_RC4_DECRYPT_LENGTH, rc4_known_ciphertext, FIPS_RC4_ENCRYPT_LENGTH ); RC4_DestroyContext( rc4_context, PR_TRUE ); if( ( rc4_status != SECSuccess ) || ( rc4_bytes_decrypted != FIPS_RC4_DECRYPT_LENGTH ) || ( PORT_Memcmp( rc4_computed_plaintext, rc4_known_plaintext, FIPS_RC4_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fips_DES_PowerUpSelfTest( void ) { /* DES Known Key (56-bits). */ static const PRUint8 des_known_key[] = { "ANSI DES" }; /* DES-CBC Known Initialization Vector (64-bits). */ static const PRUint8 des_cbc_known_initialization_vector[] = { "Security" }; /* DES Known Plaintext (64-bits). */ static const PRUint8 des_ecb_known_plaintext[] = { "Netscape" }; static const PRUint8 des_cbc_known_plaintext[] = { "Netscape" }; /* DES Known Ciphertext (64-bits). */ static const PRUint8 des_ecb_known_ciphertext[] = { 0x26,0x14,0xe9,0xc3,0x28,0x80,0x50,0xb0}; static const PRUint8 des_cbc_known_ciphertext[] = { 0x5e,0x95,0x94,0x5d,0x76,0xa2,0xd3,0x7d}; /* DES variables. */ PRUint8 des_computed_ciphertext[FIPS_DES_ENCRYPT_LENGTH]; PRUint8 des_computed_plaintext[FIPS_DES_DECRYPT_LENGTH]; DESContext * des_context; unsigned int des_bytes_encrypted; unsigned int des_bytes_decrypted; SECStatus des_status; /******************************************************/ /* DES-ECB Single-Round Known Answer Encryption Test: */ /******************************************************/ des_context = DES_CreateContext( des_known_key, NULL, NSS_DES, PR_TRUE ); if( des_context == NULL ) return( CKR_HOST_MEMORY ); des_status = DES_Encrypt( des_context, des_computed_ciphertext, &des_bytes_encrypted, FIPS_DES_ENCRYPT_LENGTH, des_ecb_known_plaintext, FIPS_DES_DECRYPT_LENGTH ); DES_DestroyContext( des_context, PR_TRUE ); if( ( des_status != SECSuccess ) || ( des_bytes_encrypted != FIPS_DES_ENCRYPT_LENGTH ) || ( PORT_Memcmp( des_computed_ciphertext, des_ecb_known_ciphertext, FIPS_DES_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* DES-ECB Single-Round Known Answer Decryption Test: */ /******************************************************/ des_context = DES_CreateContext( des_known_key, NULL, NSS_DES, PR_FALSE ); if( des_context == NULL ) return( CKR_HOST_MEMORY ); des_status = DES_Decrypt( des_context, des_computed_plaintext, &des_bytes_decrypted, FIPS_DES_DECRYPT_LENGTH, des_ecb_known_ciphertext, FIPS_DES_ENCRYPT_LENGTH ); DES_DestroyContext( des_context, PR_TRUE ); if( ( des_status != SECSuccess ) || ( des_bytes_decrypted != FIPS_DES_DECRYPT_LENGTH ) || ( PORT_Memcmp( des_computed_plaintext, des_ecb_known_plaintext, FIPS_DES_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* DES-CBC Single-Round Known Answer Encryption Test. */ /******************************************************/ des_context = DES_CreateContext( des_known_key, des_cbc_known_initialization_vector, NSS_DES_CBC, PR_TRUE ); if( des_context == NULL ) return( CKR_HOST_MEMORY ); des_status = DES_Encrypt( des_context, des_computed_ciphertext, &des_bytes_encrypted, FIPS_DES_ENCRYPT_LENGTH, des_cbc_known_plaintext, FIPS_DES_DECRYPT_LENGTH ); DES_DestroyContext( des_context, PR_TRUE ); if( ( des_status != SECSuccess ) || ( des_bytes_encrypted != FIPS_DES_ENCRYPT_LENGTH ) || ( PORT_Memcmp( des_computed_ciphertext, des_cbc_known_ciphertext, FIPS_DES_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* DES-CBC Single-Round Known Answer Decryption Test. */ /******************************************************/ des_context = DES_CreateContext( des_known_key, des_cbc_known_initialization_vector, NSS_DES_CBC, PR_FALSE ); if( des_context == NULL ) return( CKR_HOST_MEMORY ); des_status = DES_Decrypt( des_context, des_computed_plaintext, &des_bytes_decrypted, FIPS_DES_DECRYPT_LENGTH, des_cbc_known_ciphertext, FIPS_DES_ENCRYPT_LENGTH ); DES_DestroyContext( des_context, PR_TRUE ); if( ( des_status != SECSuccess ) || ( des_bytes_decrypted != FIPS_DES_DECRYPT_LENGTH ) || ( PORT_Memcmp( des_computed_plaintext, des_cbc_known_plaintext, FIPS_DES_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fips_DES3_PowerUpSelfTest( void ) { /* DES3 Known Key (56-bits). */ static const PRUint8 des3_known_key[] = { "ANSI Triple-DES Key Data" }; /* DES3-CBC Known Initialization Vector (64-bits). */ static const PRUint8 des3_cbc_known_initialization_vector[] = { "Security" }; /* DES3 Known Plaintext (64-bits). */ static const PRUint8 des3_ecb_known_plaintext[] = { "Netscape" }; static const PRUint8 des3_cbc_known_plaintext[] = { "Netscape" }; /* DES3 Known Ciphertext (64-bits). */ static const PRUint8 des3_ecb_known_ciphertext[] = { 0x55,0x8e,0xad,0x3c,0xee,0x49,0x69,0xbe}; static const PRUint8 des3_cbc_known_ciphertext[] = { 0x43,0xdc,0x6a,0xc1,0xaf,0xa6,0x32,0xf5}; /* DES3 variables. */ PRUint8 des3_computed_ciphertext[FIPS_DES3_ENCRYPT_LENGTH]; PRUint8 des3_computed_plaintext[FIPS_DES3_DECRYPT_LENGTH]; DESContext * des3_context; unsigned int des3_bytes_encrypted; unsigned int des3_bytes_decrypted; SECStatus des3_status; /*******************************************************/ /* DES3-ECB Single-Round Known Answer Encryption Test. */ /*******************************************************/ des3_context = DES_CreateContext( des3_known_key, NULL, NSS_DES_EDE3, PR_TRUE ); if( des3_context == NULL ) return( CKR_HOST_MEMORY ); des3_status = DES_Encrypt( des3_context, des3_computed_ciphertext, &des3_bytes_encrypted, FIPS_DES3_ENCRYPT_LENGTH, des3_ecb_known_plaintext, FIPS_DES3_DECRYPT_LENGTH ); DES_DestroyContext( des3_context, PR_TRUE ); if( ( des3_status != SECSuccess ) || ( des3_bytes_encrypted != FIPS_DES3_ENCRYPT_LENGTH ) || ( PORT_Memcmp( des3_computed_ciphertext, des3_ecb_known_ciphertext, FIPS_DES3_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /*******************************************************/ /* DES3-ECB Single-Round Known Answer Decryption Test. */ /*******************************************************/ des3_context = DES_CreateContext( des3_known_key, NULL, NSS_DES_EDE3, PR_FALSE ); if( des3_context == NULL ) return( CKR_HOST_MEMORY ); des3_status = DES_Decrypt( des3_context, des3_computed_plaintext, &des3_bytes_decrypted, FIPS_DES3_DECRYPT_LENGTH, des3_ecb_known_ciphertext, FIPS_DES3_ENCRYPT_LENGTH ); DES_DestroyContext( des3_context, PR_TRUE ); if( ( des3_status != SECSuccess ) || ( des3_bytes_decrypted != FIPS_DES3_DECRYPT_LENGTH ) || ( PORT_Memcmp( des3_computed_plaintext, des3_ecb_known_plaintext, FIPS_DES3_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /*******************************************************/ /* DES3-CBC Single-Round Known Answer Encryption Test. */ /*******************************************************/ des3_context = DES_CreateContext( des3_known_key, des3_cbc_known_initialization_vector, NSS_DES_EDE3_CBC, PR_TRUE ); if( des3_context == NULL ) return( CKR_HOST_MEMORY ); des3_status = DES_Encrypt( des3_context, des3_computed_ciphertext, &des3_bytes_encrypted, FIPS_DES3_ENCRYPT_LENGTH, des3_cbc_known_plaintext, FIPS_DES3_DECRYPT_LENGTH ); DES_DestroyContext( des3_context, PR_TRUE ); if( ( des3_status != SECSuccess ) || ( des3_bytes_encrypted != FIPS_DES3_ENCRYPT_LENGTH ) || ( PORT_Memcmp( des3_computed_ciphertext, des3_cbc_known_ciphertext, FIPS_DES3_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /*******************************************************/ /* DES3-CBC Single-Round Known Answer Decryption Test. */ /*******************************************************/ des3_context = DES_CreateContext( des3_known_key, des3_cbc_known_initialization_vector, NSS_DES_EDE3_CBC, PR_FALSE ); if( des3_context == NULL ) return( CKR_HOST_MEMORY ); des3_status = DES_Decrypt( des3_context, des3_computed_plaintext, &des3_bytes_decrypted, FIPS_DES3_DECRYPT_LENGTH, des3_cbc_known_ciphertext, FIPS_DES3_ENCRYPT_LENGTH ); DES_DestroyContext( des3_context, PR_TRUE ); if( ( des3_status != SECSuccess ) || ( des3_bytes_decrypted != FIPS_DES3_DECRYPT_LENGTH ) || ( PORT_Memcmp( des3_computed_plaintext, des3_cbc_known_plaintext, FIPS_DES3_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } /* AES self-test for 128-bit, 192-bit, or 256-bit key sizes*/ static CK_RV sftk_fips_AES_PowerUpSelfTest( int aes_key_size ) { /* AES Known Key (up to 256-bits). */ static const PRUint8 aes_known_key[] = { "AES-128 RIJNDAELLEADNJIR 821-SEA" }; /* AES-CBC Known Initialization Vector (128-bits). */ static const PRUint8 aes_cbc_known_initialization_vector[] = { "SecurityytiruceS" }; /* AES Known Plaintext (128-bits). (blocksize is 128-bits) */ static const PRUint8 aes_known_plaintext[] = { "NetscapeepacsteN" }; /* AES Known Ciphertext (128-bit key). */ static const PRUint8 aes_ecb128_known_ciphertext[] = { 0x3c,0xa5,0x96,0xf3,0x34,0x6a,0x96,0xc1, 0x03,0x88,0x16,0x7b,0x20,0xbf,0x35,0x47 }; static const PRUint8 aes_cbc128_known_ciphertext[] = { 0xcf,0x15,0x1d,0x4f,0x96,0xe4,0x4f,0x63, 0x15,0x54,0x14,0x1d,0x4e,0xd8,0xd5,0xea }; /* AES Known Ciphertext (192-bit key). */ static const PRUint8 aes_ecb192_known_ciphertext[] = { 0xa0,0x18,0x62,0xed,0x88,0x19,0xcb,0x62, 0x88,0x1d,0x4d,0xfe,0x84,0x02,0x89,0x0e }; static const PRUint8 aes_cbc192_known_ciphertext[] = { 0x83,0xf7,0xa4,0x76,0xd1,0x6f,0x07,0xbe, 0x07,0xbc,0x43,0x2f,0x6d,0xad,0x29,0xe1 }; /* AES Known Ciphertext (256-bit key). */ static const PRUint8 aes_ecb256_known_ciphertext[] = { 0xdb,0xa6,0x52,0x01,0x8a,0x70,0xae,0x66, 0x3a,0x99,0xd8,0x95,0x7f,0xfb,0x01,0x67 }; static const PRUint8 aes_cbc256_known_ciphertext[] = { 0x37,0xea,0x07,0x06,0x31,0x1c,0x59,0x27, 0xc5,0xc5,0x68,0x71,0x6e,0x34,0x40,0x16 }; const PRUint8 *aes_ecb_known_ciphertext = ( aes_key_size == FIPS_AES_128_KEY_SIZE) ? aes_ecb128_known_ciphertext : ( aes_key_size == FIPS_AES_192_KEY_SIZE) ? aes_ecb192_known_ciphertext : aes_ecb256_known_ciphertext; const PRUint8 *aes_cbc_known_ciphertext = ( aes_key_size == FIPS_AES_128_KEY_SIZE) ? aes_cbc128_known_ciphertext : ( aes_key_size == FIPS_AES_192_KEY_SIZE) ? aes_cbc192_known_ciphertext : aes_cbc256_known_ciphertext; /* AES variables. */ PRUint8 aes_computed_ciphertext[FIPS_AES_ENCRYPT_LENGTH]; PRUint8 aes_computed_plaintext[FIPS_AES_DECRYPT_LENGTH]; AESContext * aes_context; unsigned int aes_bytes_encrypted; unsigned int aes_bytes_decrypted; SECStatus aes_status; /*check if aes_key_size is 128, 192, or 256 bits */ if ((aes_key_size != FIPS_AES_128_KEY_SIZE) && (aes_key_size != FIPS_AES_192_KEY_SIZE) && (aes_key_size != FIPS_AES_256_KEY_SIZE)) return( CKR_DEVICE_ERROR ); /******************************************************/ /* AES-ECB Single-Round Known Answer Encryption Test: */ /******************************************************/ aes_context = AES_CreateContext( aes_known_key, NULL, NSS_AES, PR_TRUE, aes_key_size, FIPS_AES_BLOCK_SIZE ); if( aes_context == NULL ) return( CKR_HOST_MEMORY ); aes_status = AES_Encrypt( aes_context, aes_computed_ciphertext, &aes_bytes_encrypted, FIPS_AES_ENCRYPT_LENGTH, aes_known_plaintext, FIPS_AES_DECRYPT_LENGTH ); AES_DestroyContext( aes_context, PR_TRUE ); if( ( aes_status != SECSuccess ) || ( aes_bytes_encrypted != FIPS_AES_ENCRYPT_LENGTH ) || ( PORT_Memcmp( aes_computed_ciphertext, aes_ecb_known_ciphertext, FIPS_AES_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* AES-ECB Single-Round Known Answer Decryption Test: */ /******************************************************/ aes_context = AES_CreateContext( aes_known_key, NULL, NSS_AES, PR_FALSE, aes_key_size, FIPS_AES_BLOCK_SIZE ); if( aes_context == NULL ) return( CKR_HOST_MEMORY ); aes_status = AES_Decrypt( aes_context, aes_computed_plaintext, &aes_bytes_decrypted, FIPS_AES_DECRYPT_LENGTH, aes_ecb_known_ciphertext, FIPS_AES_ENCRYPT_LENGTH ); AES_DestroyContext( aes_context, PR_TRUE ); if( ( aes_status != SECSuccess ) || ( aes_bytes_decrypted != FIPS_AES_DECRYPT_LENGTH ) || ( PORT_Memcmp( aes_computed_plaintext, aes_known_plaintext, FIPS_AES_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* AES-CBC Single-Round Known Answer Encryption Test. */ /******************************************************/ aes_context = AES_CreateContext( aes_known_key, aes_cbc_known_initialization_vector, NSS_AES_CBC, PR_TRUE, aes_key_size, FIPS_AES_BLOCK_SIZE ); if( aes_context == NULL ) return( CKR_HOST_MEMORY ); aes_status = AES_Encrypt( aes_context, aes_computed_ciphertext, &aes_bytes_encrypted, FIPS_AES_ENCRYPT_LENGTH, aes_known_plaintext, FIPS_AES_DECRYPT_LENGTH ); AES_DestroyContext( aes_context, PR_TRUE ); if( ( aes_status != SECSuccess ) || ( aes_bytes_encrypted != FIPS_AES_ENCRYPT_LENGTH ) || ( PORT_Memcmp( aes_computed_ciphertext, aes_cbc_known_ciphertext, FIPS_AES_ENCRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /******************************************************/ /* AES-CBC Single-Round Known Answer Decryption Test. */ /******************************************************/ aes_context = AES_CreateContext( aes_known_key, aes_cbc_known_initialization_vector, NSS_AES_CBC, PR_FALSE, aes_key_size, FIPS_AES_BLOCK_SIZE ); if( aes_context == NULL ) return( CKR_HOST_MEMORY ); aes_status = AES_Decrypt( aes_context, aes_computed_plaintext, &aes_bytes_decrypted, FIPS_AES_DECRYPT_LENGTH, aes_cbc_known_ciphertext, FIPS_AES_ENCRYPT_LENGTH ); AES_DestroyContext( aes_context, PR_TRUE ); if( ( aes_status != SECSuccess ) || ( aes_bytes_decrypted != FIPS_AES_DECRYPT_LENGTH ) || ( PORT_Memcmp( aes_computed_plaintext, aes_known_plaintext, FIPS_AES_DECRYPT_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } /* Known Hash Message (512-bits). Used for all hashes (incl. SHA-N [N>1]). */ static const PRUint8 known_hash_message[] = { "The test message for the MD2, MD5, and SHA-1 hashing algorithms." }; static CK_RV sftk_fips_MD2_PowerUpSelfTest( void ) { /* MD2 Known Digest Message (128-bits). */ static const PRUint8 md2_known_digest[] = { 0x41,0x5a,0x12,0xb2,0x3f,0x28,0x97,0x17, 0x0c,0x71,0x4e,0xcc,0x40,0xc8,0x1d,0x1b}; /* MD2 variables. */ MD2Context * md2_context; unsigned int md2_bytes_hashed; PRUint8 md2_computed_digest[MD2_LENGTH]; /***********************************************/ /* MD2 Single-Round Known Answer Hashing Test. */ /***********************************************/ md2_context = MD2_NewContext(); if( md2_context == NULL ) return( CKR_HOST_MEMORY ); MD2_Begin( md2_context ); MD2_Update( md2_context, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); MD2_End( md2_context, md2_computed_digest, &md2_bytes_hashed, MD2_LENGTH ); MD2_DestroyContext( md2_context , PR_TRUE ); if( ( md2_bytes_hashed != MD2_LENGTH ) || ( PORT_Memcmp( md2_computed_digest, md2_known_digest, MD2_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fips_MD5_PowerUpSelfTest( void ) { /* MD5 Known Digest Message (128-bits). */ static const PRUint8 md5_known_digest[] = { 0x25,0xc8,0xc0,0x10,0xc5,0x6e,0x68,0x28, 0x28,0xa4,0xa5,0xd2,0x98,0x9a,0xea,0x2d}; /* MD5 variables. */ PRUint8 md5_computed_digest[MD5_LENGTH]; SECStatus md5_status; /***********************************************/ /* MD5 Single-Round Known Answer Hashing Test. */ /***********************************************/ md5_status = MD5_HashBuf( md5_computed_digest, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); if( ( md5_status != SECSuccess ) || ( PORT_Memcmp( md5_computed_digest, md5_known_digest, MD5_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } /****************************************************/ /* Single Round HMAC SHA-X test */ /****************************************************/ static SECStatus sftk_fips_HMAC(unsigned char *hmac_computed, const PRUint8 *secret_key, unsigned int secret_key_length, const PRUint8 *message, unsigned int message_length, HASH_HashType hashAlg ) { SECStatus hmac_status = SECFailure; HMACContext *cx = NULL; SECHashObject *hashObj = NULL; unsigned int bytes_hashed = 0; hashObj = (SECHashObject *) HASH_GetRawHashObject(hashAlg); if (!hashObj) return( SECFailure ); cx = HMAC_Create(hashObj, secret_key, secret_key_length, PR_TRUE); /* PR_TRUE for in FIPS mode */ if (cx == NULL) return( SECFailure ); HMAC_Begin(cx); HMAC_Update(cx, message, message_length); hmac_status = HMAC_Finish(cx, hmac_computed, &bytes_hashed, hashObj->length); HMAC_Destroy(cx, PR_TRUE); return( hmac_status ); } static CK_RV sftk_fips_HMAC_PowerUpSelfTest( void ) { static const PRUint8 HMAC_known_secret_key[] = { "Firefox and ThunderBird are awesome!"}; static const PRUint8 HMAC_known_secret_key_length = sizeof HMAC_known_secret_key; /* known SHA1 hmac (20 bytes) */ static const PRUint8 known_SHA1_hmac[] = { 0xd5, 0x85, 0xf6, 0x5b, 0x39, 0xfa, 0xb9, 0x05, 0x3b, 0x57, 0x1d, 0x61, 0xe7, 0xb8, 0x84, 0x1e, 0x5d, 0x0e, 0x1e, 0x11}; /* known SHA224 hmac (28 bytes) */ static const PRUint8 known_SHA224_hmac[] = { 0x1c, 0xc3, 0x06, 0x8e, 0xce, 0x37, 0x68, 0xfb, 0x1a, 0x82, 0x4a, 0xbe, 0x2b, 0x00, 0x51, 0xf8, 0x9d, 0xb6, 0xe0, 0x90, 0x0d, 0x00, 0xc9, 0x64, 0x9a, 0xb8, 0x98, 0x4e}; /* known SHA256 hmac (32 bytes) */ static const PRUint8 known_SHA256_hmac[] = { 0x05, 0x75, 0x9a, 0x9e, 0x70, 0x5e, 0xe7, 0x44, 0xe2, 0x46, 0x4b, 0x92, 0x22, 0x14, 0x22, 0xe0, 0x1b, 0x92, 0x8a, 0x0c, 0xfe, 0xf5, 0x49, 0xe9, 0xa7, 0x1b, 0x56, 0x7d, 0x1d, 0x29, 0x40, 0x48}; /* known SHA384 hmac (48 bytes) */ static const PRUint8 known_SHA384_hmac[] = { 0xcd, 0x56, 0x14, 0xec, 0x05, 0x53, 0x06, 0x2b, 0x7e, 0x9c, 0x8a, 0x18, 0x5e, 0xea, 0xf3, 0x91, 0x33, 0xfb, 0x64, 0xf6, 0xe3, 0x9f, 0x89, 0x0b, 0xaf, 0xbe, 0x83, 0x4d, 0x3f, 0x3c, 0x43, 0x4d, 0x4a, 0x0c, 0x56, 0x98, 0xf8, 0xca, 0xb4, 0xaa, 0x9a, 0xf4, 0x0a, 0xaf, 0x4f, 0x69, 0xca, 0x87}; /* known SHA512 hmac (64 bytes) */ static const PRUint8 known_SHA512_hmac[] = { 0xf6, 0x0e, 0x97, 0x12, 0x00, 0x67, 0x6e, 0xb9, 0x0c, 0xb2, 0x63, 0xf0, 0x60, 0xac, 0x75, 0x62, 0x70, 0x95, 0x2a, 0x52, 0x22, 0xee, 0xdd, 0xd2, 0x71, 0xb1, 0xe8, 0x26, 0x33, 0xd3, 0x13, 0x27, 0xcb, 0xff, 0x44, 0xef, 0x87, 0x97, 0x16, 0xfb, 0xd3, 0x0b, 0x48, 0xbe, 0x12, 0x4e, 0xda, 0xb1, 0x89, 0x90, 0xfb, 0x06, 0x0c, 0xbe, 0xe5, 0xc4, 0xff, 0x24, 0x37, 0x3d, 0xc7, 0xe4, 0xe4, 0x37}; SECStatus hmac_status; PRUint8 hmac_computed[HASH_LENGTH_MAX]; /***************************************************/ /* HMAC SHA-1 Single-Round Known Answer HMAC Test. */ /***************************************************/ hmac_status = sftk_fips_HMAC(hmac_computed, HMAC_known_secret_key, HMAC_known_secret_key_length, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH, HASH_AlgSHA1); if( ( hmac_status != SECSuccess ) || ( PORT_Memcmp( hmac_computed, known_SHA1_hmac, SHA1_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* HMAC SHA-224 Single-Round Known Answer Test. */ /***************************************************/ hmac_status = sftk_fips_HMAC(hmac_computed, HMAC_known_secret_key, HMAC_known_secret_key_length, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH, HASH_AlgSHA224); if( ( hmac_status != SECSuccess ) || ( PORT_Memcmp( hmac_computed, known_SHA224_hmac, SHA224_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* HMAC SHA-256 Single-Round Known Answer Test. */ /***************************************************/ hmac_status = sftk_fips_HMAC(hmac_computed, HMAC_known_secret_key, HMAC_known_secret_key_length, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH, HASH_AlgSHA256); if( ( hmac_status != SECSuccess ) || ( PORT_Memcmp( hmac_computed, known_SHA256_hmac, SHA256_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* HMAC SHA-384 Single-Round Known Answer Test. */ /***************************************************/ hmac_status = sftk_fips_HMAC(hmac_computed, HMAC_known_secret_key, HMAC_known_secret_key_length, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH, HASH_AlgSHA384); if( ( hmac_status != SECSuccess ) || ( PORT_Memcmp( hmac_computed, known_SHA384_hmac, SHA384_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* HMAC SHA-512 Single-Round Known Answer Test. */ /***************************************************/ hmac_status = sftk_fips_HMAC(hmac_computed, HMAC_known_secret_key, HMAC_known_secret_key_length, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH, HASH_AlgSHA512); if( ( hmac_status != SECSuccess ) || ( PORT_Memcmp( hmac_computed, known_SHA512_hmac, SHA512_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fips_SHA_PowerUpSelfTest( void ) { /* SHA-1 Known Digest Message (160-bits). */ static const PRUint8 sha1_known_digest[] = { 0x0a,0x6d,0x07,0xba,0x1e,0xbd,0x8a,0x1b, 0x72,0xf6,0xc7,0x22,0xf1,0x27,0x9f,0xf0, 0xe0,0x68,0x47,0x7a}; /* SHA-224 Known Digest Message (224-bits). */ static const PRUint8 sha224_known_digest[] = { 0x89,0x5e,0x7f,0xfd,0x0e,0xd8,0x35,0x6f, 0x64,0x6d,0xf2,0xde,0x5e,0xed,0xa6,0x7f, 0x29,0xd1,0x12,0x73,0x42,0x84,0x95,0x4f, 0x8e,0x08,0xe5,0xcb}; /* SHA-256 Known Digest Message (256-bits). */ static const PRUint8 sha256_known_digest[] = { 0x38,0xa9,0xc1,0xf0,0x35,0xf6,0x5d,0x61, 0x11,0xd4,0x0b,0xdc,0xce,0x35,0x14,0x8d, 0xf2,0xdd,0xaf,0xaf,0xcf,0xb7,0x87,0xe9, 0x96,0xa5,0xd2,0x83,0x62,0x46,0x56,0x79}; /* SHA-384 Known Digest Message (384-bits). */ static const PRUint8 sha384_known_digest[] = { 0x11,0xfe,0x1c,0x00,0x89,0x48,0xde,0xb3, 0x99,0xee,0x1c,0x18,0xb4,0x10,0xfb,0xfe, 0xe3,0xa8,0x2c,0xf3,0x04,0xb0,0x2f,0xc8, 0xa3,0xc4,0x5e,0xea,0x7e,0x60,0x48,0x7b, 0xce,0x2c,0x62,0xf7,0xbc,0xa7,0xe8,0xa3, 0xcf,0x24,0xce,0x9c,0xe2,0x8b,0x09,0x72}; /* SHA-512 Known Digest Message (512-bits). */ static const PRUint8 sha512_known_digest[] = { 0xc8,0xb3,0x27,0xf9,0x0b,0x24,0xc8,0xbf, 0x4c,0xba,0x33,0x54,0xf2,0x31,0xbf,0xdb, 0xab,0xfd,0xb3,0x15,0xd7,0xfa,0x48,0x99, 0x07,0x60,0x0f,0x57,0x41,0x1a,0xdd,0x28, 0x12,0x55,0x25,0xac,0xba,0x3a,0x99,0x12, 0x2c,0x7a,0x8f,0x75,0x3a,0xe1,0x06,0x6f, 0x30,0x31,0xc9,0x33,0xc6,0x1b,0x90,0x1a, 0x6c,0x98,0x9a,0x87,0xd0,0xb2,0xf8,0x07}; /* SHA-X variables. */ PRUint8 sha_computed_digest[HASH_LENGTH_MAX]; SECStatus sha_status; /*************************************************/ /* SHA-1 Single-Round Known Answer Hashing Test. */ /*************************************************/ sha_status = SHA1_HashBuf( sha_computed_digest, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); if( ( sha_status != SECSuccess ) || ( PORT_Memcmp( sha_computed_digest, sha1_known_digest, SHA1_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* SHA-224 Single-Round Known Answer Hashing Test. */ /***************************************************/ sha_status = SHA224_HashBuf( sha_computed_digest, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); if( ( sha_status != SECSuccess ) || ( PORT_Memcmp( sha_computed_digest, sha224_known_digest, SHA224_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* SHA-256 Single-Round Known Answer Hashing Test. */ /***************************************************/ sha_status = SHA256_HashBuf( sha_computed_digest, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); if( ( sha_status != SECSuccess ) || ( PORT_Memcmp( sha_computed_digest, sha256_known_digest, SHA256_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* SHA-384 Single-Round Known Answer Hashing Test. */ /***************************************************/ sha_status = SHA384_HashBuf( sha_computed_digest, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); if( ( sha_status != SECSuccess ) || ( PORT_Memcmp( sha_computed_digest, sha384_known_digest, SHA384_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); /***************************************************/ /* SHA-512 Single-Round Known Answer Hashing Test. */ /***************************************************/ sha_status = SHA512_HashBuf( sha_computed_digest, known_hash_message, FIPS_KNOWN_HASH_MESSAGE_LENGTH ); if( ( sha_status != SECSuccess ) || ( PORT_Memcmp( sha_computed_digest, sha512_known_digest, SHA512_LENGTH ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } /* * Single round RSA Signature Known Answer Test */ static SECStatus sftk_fips_RSA_PowerUpSigSelfTest (HASH_HashType shaAlg, NSSLOWKEYPublicKey *rsa_public_key, NSSLOWKEYPrivateKey *rsa_private_key, const unsigned char *rsa_known_msg, const unsigned int rsa_kmsg_length, const unsigned char *rsa_known_signature) { SECOidTag shaOid; /* SHA OID */ unsigned char sha[HASH_LENGTH_MAX]; /* SHA digest */ unsigned int shaLength = 0; /* length of SHA */ unsigned int rsa_bytes_signed; unsigned char rsa_computed_signature[FIPS_RSA_SIGNATURE_LENGTH]; SECStatus rv; if (shaAlg == HASH_AlgSHA1) { if (SHA1_HashBuf(sha, rsa_known_msg, rsa_kmsg_length) != SECSuccess) { goto loser; } shaLength = SHA1_LENGTH; shaOid = SEC_OID_SHA1; } else if (shaAlg == HASH_AlgSHA256) { if (SHA256_HashBuf(sha, rsa_known_msg, rsa_kmsg_length) != SECSuccess) { goto loser; } shaLength = SHA256_LENGTH; shaOid = SEC_OID_SHA256; } else if (shaAlg == HASH_AlgSHA384) { if (SHA384_HashBuf(sha, rsa_known_msg, rsa_kmsg_length) != SECSuccess) { goto loser; } shaLength = SHA384_LENGTH; shaOid = SEC_OID_SHA384; } else if (shaAlg == HASH_AlgSHA512) { if (SHA512_HashBuf(sha, rsa_known_msg, rsa_kmsg_length) != SECSuccess) { goto loser; } shaLength = SHA512_LENGTH; shaOid = SEC_OID_SHA512; } else { goto loser; } /*************************************************/ /* RSA Single-Round Known Answer Signature Test. */ /*************************************************/ /* Perform RSA signature with the RSA private key. */ rv = RSA_HashSign( shaOid, rsa_private_key, rsa_computed_signature, &rsa_bytes_signed, FIPS_RSA_SIGNATURE_LENGTH, sha, shaLength); if( ( rv != SECSuccess ) || ( rsa_bytes_signed != FIPS_RSA_SIGNATURE_LENGTH ) || ( PORT_Memcmp( rsa_computed_signature, rsa_known_signature, FIPS_RSA_SIGNATURE_LENGTH ) != 0 ) ) { goto loser; } /****************************************************/ /* RSA Single-Round Known Answer Verification Test. */ /****************************************************/ /* Perform RSA verification with the RSA public key. */ rv = RSA_HashCheckSign( shaOid, rsa_public_key, rsa_computed_signature, rsa_bytes_signed, sha, shaLength); if( rv != SECSuccess ) { goto loser; } return( SECSuccess ); loser: return( SECFailure ); } static CK_RV sftk_fips_RSA_PowerUpSelfTest( void ) { /* RSA Known Modulus used in both Public/Private Key Values (2048-bits). */ static const PRUint8 rsa_modulus[FIPS_RSA_MODULUS_LENGTH] = { 0xb8, 0x15, 0x00, 0x33, 0xda, 0x0c, 0x9d, 0xa5, 0x14, 0x8c, 0xde, 0x1f, 0x23, 0x07, 0x54, 0xe2, 0xc6, 0xb9, 0x51, 0x04, 0xc9, 0x65, 0x24, 0x6e, 0x0a, 0x46, 0x34, 0x5c, 0x37, 0x86, 0x6b, 0x88, 0x24, 0x27, 0xac, 0xa5, 0x02, 0x79, 0xfb, 0xed, 0x75, 0xc5, 0x3f, 0x6e, 0xdf, 0x05, 0x5f, 0x0f, 0x20, 0x70, 0xa0, 0x5b, 0x85, 0xdb, 0xac, 0xb9, 0x5f, 0x02, 0xc2, 0x64, 0x1e, 0x84, 0x5b, 0x3e, 0xad, 0xbf, 0xf6, 0x2e, 0x51, 0xd6, 0xad, 0xf7, 0xa7, 0x86, 0x75, 0x86, 0xec, 0xa7, 0xe1, 0xf7, 0x08, 0xbf, 0xdc, 0x56, 0xb1, 0x3b, 0xca, 0xd8, 0xfc, 0x51, 0xdf, 0x9a, 0x2a, 0x37, 0x06, 0xf2, 0xd1, 0x6b, 0x9a, 0x5e, 0x2a, 0xe5, 0x20, 0x57, 0x35, 0x9f, 0x1f, 0x98, 0xcf, 0x40, 0xc7, 0xd6, 0x98, 0xdb, 0xde, 0xf5, 0x64, 0x53, 0xf7, 0x9d, 0x45, 0xf3, 0xd6, 0x78, 0xb9, 0xe3, 0xa3, 0x20, 0xcd, 0x79, 0x43, 0x35, 0xef, 0xd7, 0xfb, 0xb9, 0x80, 0x88, 0x27, 0x2f, 0x63, 0xa8, 0x67, 0x3d, 0x4a, 0xfa, 0x06, 0xc6, 0xd2, 0x86, 0x0b, 0xa7, 0x28, 0xfd, 0xe0, 0x1e, 0x93, 0x4b, 0x17, 0x2e, 0xb0, 0x11, 0x6f, 0xc6, 0x2b, 0x98, 0x0f, 0x15, 0xe3, 0x87, 0x16, 0x7a, 0x7c, 0x67, 0x3e, 0x12, 0x2b, 0xf8, 0xbe, 0x48, 0xc1, 0x97, 0x47, 0xf4, 0x1f, 0x81, 0x80, 0x12, 0x28, 0xe4, 0x7b, 0x1e, 0xb7, 0x00, 0xa4, 0xde, 0xaa, 0xfb, 0x0f, 0x77, 0x84, 0xa3, 0xd6, 0xb2, 0x03, 0x48, 0xdd, 0x53, 0x8b, 0x46, 0x41, 0x28, 0x52, 0xc4, 0x53, 0xf0, 0x1c, 0x95, 0xd9, 0x36, 0xe0, 0x0f, 0x26, 0x46, 0x9c, 0x61, 0x0e, 0x80, 0xca, 0x86, 0xaf, 0x39, 0x95, 0xe5, 0x60, 0x43, 0x61, 0x3e, 0x2b, 0xb4, 0xe8, 0xbd, 0x8d, 0x77, 0x62, 0xf5, 0x32, 0x43, 0x2f, 0x4b, 0x65, 0x82, 0x14, 0xdd, 0x29, 0x5b}; /* RSA Known Public Key Values (24-bits). */ static const PRUint8 rsa_public_exponent[FIPS_RSA_PUBLIC_EXPONENT_LENGTH] = { 0x01, 0x00, 0x01 }; /* RSA Known Private Key Values (version is 8-bits), */ /* (private exponent is 2048-bits), */ /* (private prime0 is 1024-bits), */ /* (private prime1 is 1024-bits), */ /* (private prime exponent0 is 1024-bits), */ /* (private prime exponent1 is 1024-bits), */ /* and (private coefficient is 1024-bits). */ static const PRUint8 rsa_version[] = { 0x00 }; static const PRUint8 rsa_private_exponent[FIPS_RSA_PRIVATE_EXPONENT_LENGTH] = {0x29, 0x08, 0x05, 0x53, 0x89, 0x76, 0xe6, 0x6c, 0xb5, 0x77, 0xf0, 0xca, 0xdf, 0xf3, 0xf2, 0x67, 0xda, 0x03, 0xd4, 0x9b, 0x4c, 0x88, 0xce, 0xe5, 0xf8, 0x44, 0x4d, 0xc7, 0x80, 0x58, 0xe5, 0xff, 0x22, 0x8f, 0xf5, 0x5b, 0x92, 0x81, 0xbe, 0x35, 0xdf, 0xda, 0x67, 0x99, 0x3e, 0xfc, 0xe3, 0x83, 0x6b, 0xa7, 0xaf, 0x16, 0xb7, 0x6f, 0x8f, 0xc0, 0x81, 0xfd, 0x0b, 0x77, 0x65, 0x95, 0xfb, 0x00, 0xad, 0x99, 0xec, 0x35, 0xc6, 0xe8, 0x23, 0x3e, 0xe0, 0x88, 0x88, 0x09, 0xdb, 0x16, 0x50, 0xb7, 0xcf, 0xab, 0x74, 0x61, 0x9e, 0x7f, 0xc5, 0x67, 0x38, 0x56, 0xc7, 0x90, 0x85, 0x78, 0x5e, 0x84, 0x21, 0x49, 0xea, 0xce, 0xb2, 0xa0, 0xff, 0xe4, 0x70, 0x7f, 0x57, 0x7b, 0xa8, 0x36, 0xb8, 0x54, 0x8d, 0x1d, 0xf5, 0x44, 0x9d, 0x68, 0x59, 0xf9, 0x24, 0x6e, 0x85, 0x8f, 0xc3, 0x5f, 0x8a, 0x2c, 0x94, 0xb7, 0xbc, 0x0e, 0xa5, 0xef, 0x93, 0x06, 0x38, 0xcd, 0x07, 0x0c, 0xae, 0xb8, 0x44, 0x1a, 0xd8, 0xe7, 0xf5, 0x9a, 0x1e, 0x9c, 0x18, 0xc7, 0x6a, 0xc2, 0x7f, 0x28, 0x01, 0x4f, 0xb4, 0xb8, 0x90, 0x97, 0x5a, 0x43, 0x38, 0xad, 0xe8, 0x95, 0x68, 0x83, 0x1a, 0x1b, 0x10, 0x07, 0xe6, 0x02, 0x52, 0x1f, 0xbf, 0x76, 0x6b, 0x46, 0xd6, 0xfb, 0xc3, 0xbe, 0xb5, 0xac, 0x52, 0x53, 0x01, 0x1c, 0xf3, 0xc5, 0xeb, 0x64, 0xf2, 0x1e, 0xc4, 0x38, 0xe9, 0xaa, 0xd9, 0xc3, 0x72, 0x51, 0xa5, 0x44, 0x58, 0x69, 0x0b, 0x1b, 0x98, 0x7f, 0xf2, 0x23, 0xff, 0xeb, 0xf0, 0x75, 0x24, 0xcf, 0xc5, 0x1e, 0xb8, 0x6a, 0xc5, 0x2f, 0x4f, 0x23, 0x50, 0x7d, 0x15, 0x9d, 0x19, 0x7a, 0x0b, 0x82, 0xe0, 0x21, 0x5b, 0x5f, 0x9d, 0x50, 0x2b, 0x83, 0xe4, 0x48, 0xcc, 0x39, 0xe5, 0xfb, 0x13, 0x7b, 0x6f, 0x81 }; static const PRUint8 rsa_prime0[FIPS_RSA_PRIME0_LENGTH] = { 0xe4, 0xbf, 0x21, 0x62, 0x9b, 0xa9, 0x77, 0x40, 0x8d, 0x2a, 0xce, 0xa1, 0x67, 0x5a, 0x4c, 0x96, 0x45, 0x98, 0x67, 0xbd, 0x75, 0x22, 0x33, 0x6f, 0xe6, 0xcb, 0x77, 0xde, 0x9e, 0x97, 0x7d, 0x96, 0x8c, 0x5e, 0x5d, 0x34, 0xfb, 0x27, 0xfc, 0x6d, 0x74, 0xdb, 0x9d, 0x2e, 0x6d, 0xf6, 0xea, 0xfc, 0xce, 0x9e, 0xda, 0xa7, 0x25, 0xa2, 0xf4, 0x58, 0x6d, 0x0a, 0x3f, 0x01, 0xc2, 0xb4, 0xab, 0x38, 0xc1, 0x14, 0x85, 0xb6, 0xfa, 0x94, 0xc3, 0x85, 0xf9, 0x3c, 0x2e, 0x96, 0x56, 0x01, 0xe7, 0xd6, 0x14, 0x71, 0x4f, 0xfb, 0x4c, 0x85, 0x52, 0xc4, 0x61, 0x1e, 0xa5, 0x1e, 0x96, 0x13, 0x0d, 0x8f, 0x66, 0xae, 0xa0, 0xcd, 0x7d, 0x25, 0x66, 0x19, 0x15, 0xc2, 0xcf, 0xc3, 0x12, 0x3c, 0xe8, 0xa4, 0x52, 0x4c, 0xcb, 0x28, 0x3c, 0xc4, 0xbf, 0x95, 0x33, 0xe3, 0x81, 0xea, 0x0c, 0x6c, 0xa2, 0x05}; static const PRUint8 rsa_prime1[FIPS_RSA_PRIME1_LENGTH] = { 0xce, 0x03, 0x94, 0xf4, 0xa9, 0x2c, 0x1e, 0x06, 0xe7, 0x40, 0x30, 0x01, 0xf7, 0xbb, 0x68, 0x8c, 0x27, 0xd2, 0x15, 0xe3, 0x28, 0x49, 0x5b, 0xa8, 0xc1, 0x9a, 0x42, 0x7e, 0x31, 0xf9, 0x08, 0x34, 0x81, 0xa2, 0x0f, 0x04, 0x61, 0x34, 0xe3, 0x36, 0x92, 0xb1, 0x09, 0x2b, 0xe9, 0xef, 0x84, 0x88, 0xbe, 0x9c, 0x98, 0x60, 0xa6, 0x60, 0x84, 0xe9, 0x75, 0x6f, 0xcc, 0x81, 0xd1, 0x96, 0xef, 0xdd, 0x2e, 0xca, 0xc4, 0xf5, 0x42, 0xfb, 0x13, 0x2b, 0x57, 0xbf, 0x14, 0x5e, 0xc2, 0x7f, 0x77, 0x35, 0x29, 0xc4, 0xe5, 0xe0, 0xf9, 0x6d, 0x15, 0x4a, 0x42, 0x56, 0x1c, 0x3e, 0x0c, 0xc5, 0xce, 0x70, 0x08, 0x63, 0x1e, 0x73, 0xdb, 0x7e, 0x74, 0x05, 0x32, 0x01, 0xc6, 0x36, 0x32, 0x75, 0x6b, 0xed, 0x9d, 0xfe, 0x7c, 0x7e, 0xa9, 0x57, 0xb4, 0xe9, 0x22, 0xe4, 0xe7, 0xfe, 0x36, 0x07, 0x9b, 0xdf}; static const PRUint8 rsa_exponent0[FIPS_RSA_EXPONENT0_LENGTH] = { 0x04, 0x5a, 0x3a, 0xa9, 0x64, 0xaa, 0xd9, 0xd1, 0x09, 0x9e, 0x99, 0xe5, 0xea, 0x50, 0x86, 0x8a, 0x89, 0x72, 0x77, 0xee, 0xdb, 0xee, 0xb5, 0xa9, 0xd8, 0x6b, 0x60, 0xb1, 0x84, 0xb4, 0xff, 0x37, 0xc1, 0x1d, 0xfe, 0x8a, 0x06, 0x89, 0x61, 0x3d, 0x37, 0xef, 0x01, 0xd3, 0xa3, 0x56, 0x02, 0x6c, 0xa3, 0x05, 0xd4, 0xc5, 0x3f, 0x6b, 0x15, 0x59, 0x25, 0x61, 0xff, 0x86, 0xea, 0x0c, 0x84, 0x01, 0x85, 0x72, 0xfd, 0x84, 0x58, 0xca, 0x41, 0xda, 0x27, 0xbe, 0xe4, 0x68, 0x09, 0xe4, 0xe9, 0x63, 0x62, 0x6a, 0x31, 0x8a, 0x67, 0x8f, 0x55, 0xde, 0xd4, 0xb6, 0x3f, 0x90, 0x10, 0x6c, 0xf6, 0x62, 0x17, 0x23, 0x15, 0x7e, 0x33, 0x76, 0x65, 0xb5, 0xee, 0x7b, 0x11, 0x76, 0xf5, 0xbe, 0xe0, 0xf2, 0x57, 0x7a, 0x8c, 0x97, 0x0c, 0x68, 0xf5, 0xf8, 0x41, 0xcf, 0x7f, 0x66, 0x53, 0xac, 0x31, 0x7d}; static const PRUint8 rsa_exponent1[FIPS_RSA_EXPONENT1_LENGTH] = { 0x93, 0x54, 0x14, 0x6e, 0x73, 0x9d, 0x4d, 0x4b, 0xfa, 0x8c, 0xf8, 0xc8, 0x2f, 0x76, 0x22, 0xea, 0x38, 0x80, 0x11, 0x8f, 0x05, 0xfc, 0x90, 0x44, 0x3b, 0x50, 0x2a, 0x45, 0x3d, 0x4f, 0xaf, 0x02, 0x7d, 0xc2, 0x7b, 0xa2, 0xd2, 0x31, 0x94, 0x5c, 0x2e, 0xc3, 0xd4, 0x9f, 0x47, 0x09, 0x37, 0x6a, 0xe3, 0x85, 0xf1, 0xa3, 0x0c, 0xd8, 0xf1, 0xb4, 0x53, 0x7b, 0xc4, 0x71, 0x02, 0x86, 0x42, 0xbb, 0x96, 0xff, 0x03, 0xa3, 0xb2, 0x67, 0x03, 0xea, 0x77, 0x31, 0xfb, 0x4b, 0x59, 0x24, 0xf7, 0x07, 0x59, 0xfb, 0xa9, 0xba, 0x1e, 0x26, 0x58, 0x97, 0x66, 0xa1, 0x56, 0x49, 0x39, 0xb1, 0x2c, 0x55, 0x0a, 0x6a, 0x78, 0x18, 0xba, 0xdb, 0xcf, 0xf4, 0xf7, 0x32, 0x35, 0xa2, 0x04, 0xab, 0xdc, 0xa7, 0x6d, 0xd9, 0xd5, 0x06, 0x6f, 0xec, 0x7d, 0x40, 0x4c, 0xe8, 0x0e, 0xd0, 0xc9, 0xaa, 0xdf, 0x59}; static const PRUint8 rsa_coefficient[FIPS_RSA_COEFFICIENT_LENGTH] = { 0x17, 0xd7, 0xf5, 0x0a, 0xf0, 0x68, 0x97, 0x96, 0xc4, 0x29, 0x18, 0x77, 0x9a, 0x1f, 0xe3, 0xf3, 0x12, 0x13, 0x0f, 0x7e, 0x7b, 0xb9, 0xc1, 0x91, 0xf9, 0xc7, 0x08, 0x56, 0x5c, 0xa4, 0xbc, 0x83, 0x71, 0xf9, 0x78, 0xd9, 0x2b, 0xec, 0xfe, 0x6b, 0xdc, 0x2f, 0x63, 0xc9, 0xcd, 0x50, 0x14, 0x5b, 0xd3, 0x6e, 0x85, 0x4d, 0x0c, 0xa2, 0x0b, 0xa0, 0x09, 0xb6, 0xca, 0x34, 0x9c, 0xc2, 0xc1, 0x4a, 0xb0, 0xbc, 0x45, 0x93, 0xa5, 0x7e, 0x99, 0xb5, 0xbd, 0xe4, 0x69, 0x29, 0x08, 0x28, 0xd2, 0xcd, 0xab, 0x24, 0x78, 0x48, 0x41, 0x26, 0x0b, 0x37, 0xa3, 0x43, 0xd1, 0x95, 0x1a, 0xd6, 0xee, 0x22, 0x1c, 0x00, 0x0b, 0xc2, 0xb7, 0xa4, 0xa3, 0x21, 0xa9, 0xcd, 0xe4, 0x69, 0xd3, 0x45, 0x02, 0xb1, 0xb7, 0x3a, 0xbf, 0x51, 0x35, 0x1b, 0x78, 0xc2, 0xcf, 0x0c, 0x0d, 0x60, 0x09, 0xa9, 0x44, 0x02}; /* RSA Known Plaintext Message (1024-bits). */ static const PRUint8 rsa_known_plaintext_msg[FIPS_RSA_MESSAGE_LENGTH] = { "Known plaintext message utilized" "for RSA Encryption & Decryption" "blocks SHA256, SHA384 and " "SHA512 RSA Signature KAT tests. " "Known plaintext message utilized" "for RSA Encryption & Decryption" "blocks SHA256, SHA384 and " "SHA512 RSA Signature KAT tests."}; /* RSA Known Ciphertext (2048-bits). */ static const PRUint8 rsa_known_ciphertext[] = { 0x04, 0x12, 0x46, 0xe3, 0x6a, 0xee, 0xde, 0xdd, 0x49, 0xa1, 0xd9, 0x83, 0xf7, 0x35, 0xf9, 0x70, 0x88, 0x03, 0x2d, 0x01, 0x8b, 0xd1, 0xbf, 0xdb, 0xe5, 0x1c, 0x85, 0xbe, 0xb5, 0x0b, 0x48, 0x45, 0x7a, 0xf0, 0xa0, 0xe3, 0xa2, 0xbb, 0x4b, 0xf6, 0x27, 0xd0, 0x1b, 0x12, 0xe3, 0x77, 0x52, 0x34, 0x9e, 0x8e, 0x03, 0xd2, 0xf8, 0x79, 0x6e, 0x39, 0x79, 0x53, 0x3c, 0x44, 0x14, 0x94, 0xbb, 0x8d, 0xaa, 0x14, 0x44, 0xa0, 0x7b, 0xa5, 0x8c, 0x93, 0x5f, 0x99, 0xa4, 0xa3, 0x6e, 0x7a, 0x38, 0x40, 0x78, 0xfa, 0x36, 0x91, 0x5e, 0x9a, 0x9c, 0xba, 0x1e, 0xd4, 0xf9, 0xda, 0x4b, 0x0f, 0xa8, 0xa3, 0x1c, 0xf3, 0x3a, 0xd1, 0xa5, 0xb4, 0x51, 0x16, 0xed, 0x4b, 0xcf, 0xec, 0x93, 0x7b, 0x90, 0x21, 0xbc, 0x3a, 0xf4, 0x0b, 0xd1, 0x3a, 0x2b, 0xba, 0xa6, 0x7d, 0x5b, 0x53, 0xd8, 0x64, 0xf9, 0x29, 0x7b, 0x7f, 0x77, 0x3e, 0x51, 0x4c, 0x9a, 0x94, 0xd2, 0x4b, 0x4a, 0x8d, 0x61, 0x74, 0x97, 0xae, 0x53, 0x6a, 0xf4, 0x90, 0xc2, 0x2c, 0x49, 0xe2, 0xfa, 0xeb, 0x91, 0xc5, 0xe5, 0x83, 0x13, 0xc9, 0x44, 0x4b, 0x95, 0x2c, 0x57, 0x70, 0x15, 0x5c, 0x64, 0x8d, 0x1a, 0xfd, 0x2a, 0xc7, 0xb2, 0x9c, 0x5c, 0x99, 0xd3, 0x4a, 0xfd, 0xdd, 0xf6, 0x82, 0x87, 0x8c, 0x5a, 0xc4, 0xa8, 0x0d, 0x2a, 0xef, 0xc3, 0xa2, 0x7e, 0x8e, 0x67, 0x9f, 0x6f, 0x63, 0xdb, 0xbb, 0x1d, 0x31, 0xc4, 0xbb, 0xbc, 0x13, 0x3f, 0x54, 0xc6, 0xf6, 0xc5, 0x28, 0x32, 0xab, 0x96, 0x42, 0x10, 0x36, 0x40, 0x92, 0xbb, 0x57, 0x55, 0x38, 0xf5, 0x43, 0x7e, 0x43, 0xc4, 0x65, 0x47, 0x64, 0xaa, 0x0f, 0x4c, 0xe9, 0x49, 0x16, 0xec, 0x6a, 0x50, 0xfd, 0x14, 0x49, 0xca, 0xdb, 0x44, 0x54, 0xca, 0xbe, 0xa3, 0x0e, 0x5f, 0xef}; /* RSA Known Signed Hash (2048-bits). */ static const PRUint8 rsa_known_sha256_signature[] = { 0x8c, 0x2d, 0x2e, 0xfb, 0x37, 0xb5, 0x6f, 0x38, 0x9f, 0x06, 0x5a, 0xf3, 0x8c, 0xa0, 0xd0, 0x7a, 0xde, 0xcf, 0xf9, 0x14, 0x95, 0x59, 0xd3, 0x5f, 0x51, 0x5d, 0x5d, 0xad, 0xd8, 0x71, 0x33, 0x50, 0x1d, 0x03, 0x3b, 0x3a, 0x32, 0x00, 0xb4, 0xde, 0x7f, 0xe4, 0xb1, 0xe5, 0x6b, 0x83, 0xf4, 0x80, 0x10, 0x3b, 0xb8, 0x8a, 0xdb, 0xe8, 0x0a, 0x42, 0x9e, 0x8d, 0xd7, 0xbe, 0xed, 0xde, 0x5a, 0x3d, 0xc6, 0xdb, 0xfe, 0x49, 0x6a, 0xe9, 0x1e, 0x75, 0x66, 0xf1, 0x3f, 0x9e, 0x3f, 0xff, 0x05, 0x65, 0xde, 0xca, 0x62, 0x62, 0xf3, 0xec, 0x53, 0x09, 0xa0, 0x37, 0xd5, 0x66, 0x62, 0x72, 0x14, 0xb6, 0x51, 0x32, 0x67, 0x50, 0xc1, 0xe1, 0x2f, 0x9e, 0x98, 0x4e, 0x53, 0x96, 0x55, 0x4b, 0xc4, 0x92, 0xc3, 0xb4, 0x80, 0xf0, 0x35, 0xc9, 0x00, 0x4b, 0x5c, 0x85, 0x92, 0xb1, 0xe8, 0x6e, 0xa5, 0x51, 0x38, 0x9f, 0xc9, 0x11, 0xb6, 0x14, 0xdf, 0x34, 0x64, 0x40, 0x82, 0x82, 0xde, 0x16, 0x69, 0x93, 0x89, 0x4e, 0x5c, 0x32, 0xf2, 0x0a, 0x4e, 0x9e, 0xbd, 0x63, 0x99, 0x4f, 0xf3, 0x15, 0x90, 0xc2, 0xfe, 0x6f, 0xb7, 0xf4, 0xad, 0xd4, 0x8e, 0x0b, 0xd2, 0xf5, 0x22, 0xd2, 0x71, 0x65, 0x13, 0xf7, 0x82, 0x7b, 0x75, 0xb6, 0xc1, 0xb4, 0x45, 0xbd, 0x8f, 0x95, 0xcf, 0x5b, 0x95, 0x32, 0xef, 0x18, 0x5f, 0xd3, 0xdf, 0x7e, 0x22, 0xdd, 0x25, 0xeb, 0xe1, 0xbf, 0x3b, 0x9a, 0x55, 0x75, 0x4f, 0x3c, 0x38, 0x67, 0x57, 0x04, 0x04, 0x57, 0x27, 0xf6, 0x34, 0x0e, 0x57, 0x8a, 0x7c, 0xff, 0x7d, 0xca, 0x8c, 0x06, 0xf8, 0x9d, 0xdb, 0xe4, 0xd8, 0x19, 0xdd, 0x4d, 0xfd, 0x8f, 0xa0, 0x06, 0x53, 0xe8, 0x33, 0x00, 0x70, 0x3f, 0x6b, 0xc3, 0xbd, 0x9a, 0x78, 0xb5, 0xa9, 0xef, 0x6d, 0xda, 0x67, 0x92}; /* RSA Known Signed Hash (2048-bits). */ static const PRUint8 rsa_known_sha384_signature[] = { 0x20, 0x2d, 0x21, 0x3a, 0xaa, 0x1e, 0x05, 0x15, 0x5c, 0xca, 0x84, 0x86, 0xc0, 0x15, 0x81, 0xdf, 0xd4, 0x06, 0x9f, 0xe0, 0xc1, 0xed, 0xef, 0x0f, 0xfe, 0xb3, 0xc3, 0xbb, 0x28, 0xa5, 0x56, 0xbf, 0xe3, 0x11, 0x5c, 0xc2, 0xc0, 0x0b, 0xfa, 0xfa, 0x3d, 0xd3, 0x06, 0x20, 0xe2, 0xc9, 0xe4, 0x66, 0x28, 0xb7, 0xc0, 0x3b, 0x3c, 0x96, 0xc6, 0x49, 0x3b, 0xcf, 0x86, 0x49, 0x31, 0xaf, 0x5b, 0xa3, 0xec, 0x63, 0x10, 0xdf, 0xda, 0x2f, 0x68, 0xac, 0x7b, 0x3a, 0x49, 0xfa, 0xe6, 0x0d, 0xfe, 0x37, 0x17, 0x56, 0x8e, 0x5c, 0x48, 0x97, 0x43, 0xf7, 0xa0, 0xbc, 0xe3, 0x4b, 0x42, 0xde, 0x58, 0x1d, 0xd9, 0x5d, 0xb3, 0x08, 0x35, 0xbd, 0xa4, 0xe1, 0x80, 0xc3, 0x64, 0xab, 0x21, 0x97, 0xad, 0xfb, 0x71, 0xee, 0xa3, 0x3d, 0x9c, 0xaa, 0xfa, 0x16, 0x60, 0x46, 0x32, 0xda, 0x44, 0x2e, 0x10, 0x92, 0x20, 0xd8, 0x98, 0x80, 0x84, 0x75, 0x5b, 0x70, 0x91, 0x00, 0x33, 0x19, 0x69, 0xc9, 0x2a, 0xec, 0x3d, 0xe5, 0x5f, 0x0f, 0x9a, 0xa7, 0x97, 0x1f, 0x79, 0xc3, 0x1d, 0x65, 0x74, 0x62, 0xc5, 0xa1, 0x23, 0x65, 0x4b, 0x84, 0xa1, 0x03, 0x98, 0xf3, 0xf1, 0x02, 0x24, 0xca, 0xe5, 0xd4, 0xc8, 0xa2, 0x30, 0xad, 0x72, 0x7d, 0x29, 0x60, 0x1a, 0x8e, 0x6f, 0x23, 0xa4, 0xda, 0x68, 0xa4, 0x45, 0x9c, 0x39, 0x70, 0x44, 0x18, 0x4b, 0x73, 0xfe, 0xf8, 0x33, 0x53, 0x1d, 0x7e, 0x93, 0x93, 0xac, 0xc7, 0x1e, 0x6e, 0x6b, 0xfd, 0x9e, 0xba, 0xa6, 0x71, 0x70, 0x47, 0x6a, 0xd6, 0x82, 0x32, 0xa2, 0x6e, 0x20, 0x72, 0xb0, 0xba, 0xec, 0x91, 0xbb, 0x6b, 0xcc, 0x84, 0x0a, 0x33, 0x2b, 0x8a, 0x8d, 0xeb, 0x71, 0xcd, 0xca, 0x67, 0x1b, 0xad, 0x10, 0xd4, 0xce, 0x4f, 0xc0, 0x29, 0xec, 0xfa, 0xed, 0xfa}; /* RSA Known Signed Hash (2048-bits). */ static const PRUint8 rsa_known_sha512_signature[] = { 0x35, 0x0e, 0x74, 0x9d, 0xeb, 0xc7, 0x67, 0x31, 0x9f, 0xff, 0x0b, 0xbb, 0x5e, 0x66, 0xb4, 0x2f, 0xbf, 0x72, 0x60, 0x4f, 0xe9, 0xbd, 0xec, 0xc8, 0x17, 0x79, 0x5f, 0x39, 0x83, 0xb4, 0x54, 0x2e, 0x01, 0xb9, 0xd3, 0x20, 0x47, 0xcb, 0xd4, 0x42, 0xf2, 0x6e, 0x36, 0xc1, 0x97, 0xad, 0xef, 0x8e, 0xe6, 0x51, 0xee, 0x5e, 0x9e, 0x88, 0xb4, 0x9d, 0xda, 0x3e, 0x77, 0x4b, 0xe8, 0xae, 0x48, 0x53, 0x2c, 0xc4, 0xd3, 0x25, 0x6b, 0x23, 0xb7, 0x54, 0x3c, 0x95, 0x8f, 0xfb, 0x6f, 0x6d, 0xc5, 0x56, 0x39, 0x69, 0x28, 0x0e, 0x74, 0x9b, 0x31, 0xe8, 0x76, 0x77, 0x2b, 0xc1, 0x44, 0x89, 0x81, 0x93, 0xfc, 0xf6, 0xec, 0x5f, 0x8f, 0x89, 0xfc, 0x1d, 0xa4, 0x53, 0x58, 0x8c, 0xe9, 0xc0, 0xc0, 0x26, 0xe6, 0xdf, 0x6d, 0x27, 0xb1, 0x8e, 0x3e, 0xb6, 0x47, 0xe1, 0x02, 0x96, 0xc2, 0x5f, 0x7f, 0x3d, 0xc5, 0x6c, 0x2f, 0xea, 0xaa, 0x5e, 0x39, 0xfc, 0x77, 0xca, 0x00, 0x02, 0x5c, 0x64, 0x7c, 0xce, 0x7d, 0x63, 0x82, 0x05, 0xed, 0xf7, 0x5b, 0x55, 0x58, 0xc0, 0xeb, 0x76, 0xd7, 0x95, 0x55, 0x37, 0x85, 0x7d, 0x17, 0xad, 0xd2, 0x11, 0xfd, 0x97, 0x48, 0xb5, 0xc2, 0x5e, 0xc7, 0x62, 0xc0, 0xe0, 0x68, 0xa8, 0x61, 0x14, 0x41, 0xca, 0x25, 0x3a, 0xec, 0x48, 0x54, 0x22, 0x83, 0x2b, 0x69, 0x54, 0xfd, 0xc8, 0x99, 0x9a, 0xee, 0x37, 0x03, 0xa3, 0x8f, 0x0f, 0x32, 0xb0, 0xaa, 0x74, 0x39, 0x04, 0x7c, 0xd9, 0xc2, 0x8f, 0xbe, 0xf2, 0xc4, 0xbe, 0xdd, 0x7a, 0x7a, 0x7f, 0x72, 0xd3, 0x80, 0x59, 0x18, 0xa0, 0xa1, 0x2d, 0x6f, 0xa3, 0xa9, 0x48, 0xed, 0x20, 0xa6, 0xea, 0xaa, 0x10, 0x83, 0x98, 0x0c, 0x13, 0x69, 0x6e, 0xcd, 0x31, 0x6b, 0xd0, 0x66, 0xa6, 0x5e, 0x30, 0x0c, 0x82, 0xd5, 0x81}; static const RSAPublicKey bl_public_key = { NULL, { FIPS_RSA_TYPE, (unsigned char *)rsa_modulus, FIPS_RSA_MODULUS_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_public_exponent, FIPS_RSA_PUBLIC_EXPONENT_LENGTH } }; static const RSAPrivateKey bl_private_key = { NULL, { FIPS_RSA_TYPE, (unsigned char *)rsa_version, FIPS_RSA_PRIVATE_VERSION_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_modulus, FIPS_RSA_MODULUS_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_public_exponent, FIPS_RSA_PUBLIC_EXPONENT_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_private_exponent, FIPS_RSA_PRIVATE_EXPONENT_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_prime0, FIPS_RSA_PRIME0_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_prime1, FIPS_RSA_PRIME1_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_exponent0, FIPS_RSA_EXPONENT0_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_exponent1, FIPS_RSA_EXPONENT1_LENGTH }, { FIPS_RSA_TYPE, (unsigned char *)rsa_coefficient, FIPS_RSA_COEFFICIENT_LENGTH } }; /* RSA variables. */ #ifdef CREATE_TEMP_ARENAS PLArenaPool * rsa_public_arena; PLArenaPool * rsa_private_arena; #endif NSSLOWKEYPublicKey * rsa_public_key; NSSLOWKEYPrivateKey * rsa_private_key; SECStatus rsa_status; NSSLOWKEYPublicKey low_public_key = { NULL, NSSLOWKEYRSAKey, }; NSSLOWKEYPrivateKey low_private_key = { NULL, NSSLOWKEYRSAKey, }; PRUint8 rsa_computed_ciphertext[FIPS_RSA_ENCRYPT_LENGTH]; PRUint8 rsa_computed_plaintext[FIPS_RSA_DECRYPT_LENGTH]; /****************************************/ /* Compose RSA Public/Private Key Pair. */ /****************************************/ low_public_key.u.rsa = bl_public_key; low_private_key.u.rsa = bl_private_key; rsa_public_key = &low_public_key; rsa_private_key = &low_private_key; #ifdef CREATE_TEMP_ARENAS /* Create some space for the RSA public key. */ rsa_public_arena = PORT_NewArena( NSS_SOFTOKEN_DEFAULT_CHUNKSIZE ); if( rsa_public_arena == NULL ) { PORT_SetError( SEC_ERROR_NO_MEMORY ); return( CKR_HOST_MEMORY ); } /* Create some space for the RSA private key. */ rsa_private_arena = PORT_NewArena( NSS_SOFTOKEN_DEFAULT_CHUNKSIZE ); if( rsa_private_arena == NULL ) { PORT_FreeArena( rsa_public_arena, PR_TRUE ); PORT_SetError( SEC_ERROR_NO_MEMORY ); return( CKR_HOST_MEMORY ); } rsa_public_key->arena = rsa_public_arena; rsa_private_key->arena = rsa_private_arena; #endif /**************************************************/ /* RSA Single-Round Known Answer Encryption Test. */ /**************************************************/ /* Perform RSA Public Key Encryption. */ rsa_status = RSA_PublicKeyOp(&rsa_public_key->u.rsa, rsa_computed_ciphertext, rsa_known_plaintext_msg); if( ( rsa_status != SECSuccess ) || ( PORT_Memcmp( rsa_computed_ciphertext, rsa_known_ciphertext, FIPS_RSA_ENCRYPT_LENGTH ) != 0 ) ) goto rsa_loser; /**************************************************/ /* RSA Single-Round Known Answer Decryption Test. */ /**************************************************/ /* Perform RSA Private Key Decryption. */ rsa_status = RSA_PrivateKeyOp(&rsa_private_key->u.rsa, rsa_computed_plaintext, rsa_known_ciphertext); if( ( rsa_status != SECSuccess ) || ( PORT_Memcmp( rsa_computed_plaintext, rsa_known_plaintext_msg, FIPS_RSA_DECRYPT_LENGTH ) != 0 ) ) goto rsa_loser; rsa_status = sftk_fips_RSA_PowerUpSigSelfTest (HASH_AlgSHA256, rsa_public_key, rsa_private_key, rsa_known_plaintext_msg, FIPS_RSA_MESSAGE_LENGTH, rsa_known_sha256_signature); if( rsa_status != SECSuccess ) goto rsa_loser; rsa_status = sftk_fips_RSA_PowerUpSigSelfTest (HASH_AlgSHA384, rsa_public_key, rsa_private_key, rsa_known_plaintext_msg, FIPS_RSA_MESSAGE_LENGTH, rsa_known_sha384_signature); if( rsa_status != SECSuccess ) goto rsa_loser; rsa_status = sftk_fips_RSA_PowerUpSigSelfTest (HASH_AlgSHA512, rsa_public_key, rsa_private_key, rsa_known_plaintext_msg, FIPS_RSA_MESSAGE_LENGTH, rsa_known_sha512_signature); if( rsa_status != SECSuccess ) goto rsa_loser; /* Dispose of all RSA key material. */ nsslowkey_DestroyPublicKey( rsa_public_key ); nsslowkey_DestroyPrivateKey( rsa_private_key ); return( CKR_OK ); rsa_loser: nsslowkey_DestroyPublicKey( rsa_public_key ); nsslowkey_DestroyPrivateKey( rsa_private_key ); return( CKR_DEVICE_ERROR ); } #ifndef NSS_DISABLE_ECC static CK_RV sftk_fips_ECDSA_Test(const PRUint8 *encodedParams, unsigned int encodedParamsLen, const PRUint8 *knownSignature, unsigned int knownSignatureLen) { /* ECDSA Known Seed info for curves nistp256 and nistk283 */ static const PRUint8 ecdsa_Known_Seed[] = { 0x6a, 0x9b, 0xf6, 0xf7, 0xce, 0xed, 0x79, 0x11, 0xf0, 0xc7, 0xc8, 0x9a, 0xa5, 0xd1, 0x57, 0xb1, 0x7b, 0x5a, 0x3b, 0x76, 0x4e, 0x7b, 0x7c, 0xbc, 0xf2, 0x76, 0x1c, 0x1c, 0x7f, 0xc5, 0x53, 0x2f}; static const PRUint8 msg[] = { "Firefox and ThunderBird are awesome!"}; unsigned char sha1[SHA1_LENGTH]; /* SHA-1 hash (160 bits) */ unsigned char sig[2*MAX_ECKEY_LEN]; SECItem signature, digest; SECItem encodedparams; ECParams *ecparams = NULL; ECPrivateKey *ecdsa_private_key = NULL; ECPublicKey ecdsa_public_key; SECStatus ecdsaStatus = SECSuccess; /* construct the ECDSA private/public key pair */ encodedparams.type = siBuffer; encodedparams.data = (unsigned char *) encodedParams; encodedparams.len = encodedParamsLen; if (EC_DecodeParams(&encodedparams, &ecparams) != SECSuccess) { return( CKR_DEVICE_ERROR ); } /* Generates a new EC key pair. The private key is a supplied * random value (in seed) and the public key is the result of * performing a scalar point multiplication of that value with * the curve's base point. */ ecdsaStatus = EC_NewKeyFromSeed(ecparams, &ecdsa_private_key, ecdsa_Known_Seed, sizeof(ecdsa_Known_Seed)); /* free the ecparams they are no longer needed */ PORT_FreeArena(ecparams->arena, PR_FALSE); ecparams = NULL; if (ecdsaStatus != SECSuccess) { return ( CKR_DEVICE_ERROR ); } /* construct public key from private key. */ ecdsaStatus = EC_CopyParams(ecdsa_private_key->ecParams.arena, &ecdsa_public_key.ecParams, &ecdsa_private_key->ecParams); if (ecdsaStatus != SECSuccess) { goto loser; } ecdsa_public_key.publicValue = ecdsa_private_key->publicValue; /* validate public key value */ ecdsaStatus = EC_ValidatePublicKey(&ecdsa_public_key.ecParams, &ecdsa_public_key.publicValue); if (ecdsaStatus != SECSuccess) { goto loser; } /* validate public key value */ ecdsaStatus = EC_ValidatePublicKey(&ecdsa_private_key->ecParams, &ecdsa_private_key->publicValue); if (ecdsaStatus != SECSuccess) { goto loser; } /***************************************************/ /* ECDSA Single-Round Known Answer Signature Test. */ /***************************************************/ ecdsaStatus = SHA1_HashBuf(sha1, msg, sizeof msg); if (ecdsaStatus != SECSuccess) { goto loser; } digest.type = siBuffer; digest.data = sha1; digest.len = SHA1_LENGTH; memset(sig, 0, sizeof sig); signature.type = siBuffer; signature.data = sig; signature.len = sizeof sig; ecdsaStatus = ECDSA_SignDigestWithSeed(ecdsa_private_key, &signature, &digest, ecdsa_Known_Seed, sizeof ecdsa_Known_Seed); if (ecdsaStatus != SECSuccess) { goto loser; } if( ( signature.len != knownSignatureLen ) || ( PORT_Memcmp( signature.data, knownSignature, knownSignatureLen ) != 0 ) ) { ecdsaStatus = SECFailure; goto loser; } /******************************************************/ /* ECDSA Single-Round Known Answer Verification Test. */ /******************************************************/ /* Perform ECDSA verification process. */ ecdsaStatus = ECDSA_VerifyDigest(&ecdsa_public_key, &signature, &digest); loser: /* free the memory for the private key arena*/ if (ecdsa_private_key != NULL) { PORT_FreeArena(ecdsa_private_key->ecParams.arena, PR_FALSE); } if (ecdsaStatus != SECSuccess) { return CKR_DEVICE_ERROR ; } return( CKR_OK ); } static CK_RV sftk_fips_ECDSA_PowerUpSelfTest() { /* ECDSA Known curve nistp256 == SEC_OID_SECG_EC_SECP256R1 params */ static const PRUint8 ecdsa_known_P256_EncodedParams[] = { 0x06,0x08,0x2a,0x86,0x48,0xce,0x3d,0x03, 0x01,0x07}; static const PRUint8 ecdsa_known_P256_signature[] = { 0x07,0xb1,0xcb,0x57,0x20,0xa7,0x10,0xd6, 0x9d,0x37,0x4b,0x1c,0xdc,0x35,0x90,0xff, 0x1a,0x2d,0x98,0x95,0x1b,0x2f,0xeb,0x7f, 0xbb,0x81,0xca,0xc0,0x69,0x75,0xea,0xc5, 0x59,0x6a,0x62,0x49,0x3d,0x50,0xc9,0xe1, 0x27,0x3b,0xff,0x9b,0x13,0x66,0x67,0xdd, 0x7d,0xd1,0x0d,0x2d,0x7c,0x44,0x04,0x1b, 0x16,0x21,0x12,0xc5,0xcb,0xbd,0x9e,0x75}; #ifdef NSS_ECC_MORE_THAN_SUITE_B /* ECDSA Known curve nistk283 == SEC_OID_SECG_EC_SECT283K1 params */ static const PRUint8 ecdsa_known_K283_EncodedParams[] = { 0x06,0x05,0x2b,0x81,0x04,0x00,0x10}; static const PRUint8 ecdsa_known_K283_signature[] = { 0x00,0x45,0x88,0xc0,0x79,0x09,0x07,0xd1, 0x4e,0x88,0xe6,0xd5,0x2f,0x22,0x04,0x74, 0x35,0x24,0x65,0xe8,0x15,0xde,0x90,0x66, 0x94,0x70,0xdd,0x3a,0x14,0x70,0x02,0xd1, 0xef,0x86,0xbd,0x15,0x00,0xd9,0xdc,0xfc, 0x87,0x2e,0x7c,0x99,0xe2,0xe3,0x79,0xb8, 0xd9,0x10,0x49,0x78,0x4b,0x59,0x8b,0x05, 0x77,0xec,0x6c,0xe8,0x35,0xe6,0x2e,0xa9, 0xf9,0x77,0x1f,0x71,0x86,0xa5,0x4a,0xd0}; #endif CK_RV crv; /* ECDSA GF(p) prime field curve test */ crv = sftk_fips_ECDSA_Test(ecdsa_known_P256_EncodedParams, sizeof ecdsa_known_P256_EncodedParams, ecdsa_known_P256_signature, sizeof ecdsa_known_P256_signature ); if (crv != CKR_OK) { return( CKR_DEVICE_ERROR ); } #ifdef NSS_ECC_MORE_THAN_SUITE_B /* ECDSA GF(2m) binary field curve test */ crv = sftk_fips_ECDSA_Test(ecdsa_known_K283_EncodedParams, sizeof ecdsa_known_K283_EncodedParams, ecdsa_known_K283_signature, sizeof ecdsa_known_K283_signature ); if (crv != CKR_OK) { return( CKR_DEVICE_ERROR ); } #endif return( CKR_OK ); } #endif /* NSS_DISABLE_ECC */ static CK_RV sftk_fips_DSA_PowerUpSelfTest( void ) { /* DSA Known P (1024-bits), Q (160-bits), and G (1024-bits) Values. */ static const PRUint8 dsa_P[] = { 0x80,0xb0,0xd1,0x9d,0x6e,0xa4,0xf3,0x28, 0x9f,0x24,0xa9,0x8a,0x49,0xd0,0x0c,0x63, 0xe8,0x59,0x04,0xf9,0x89,0x4a,0x5e,0xc0, 0x6d,0xd2,0x67,0x6b,0x37,0x81,0x83,0x0c, 0xfe,0x3a,0x8a,0xfd,0xa0,0x3b,0x08,0x91, 0x1c,0xcb,0xb5,0x63,0xb0,0x1c,0x70,0xd0, 0xae,0xe1,0x60,0x2e,0x12,0xeb,0x54,0xc7, 0xcf,0xc6,0xcc,0xae,0x97,0x52,0x32,0x63, 0xd3,0xeb,0x55,0xea,0x2f,0x4c,0xd5,0xd7, 0x3f,0xda,0xec,0x49,0x27,0x0b,0x14,0x56, 0xc5,0x09,0xbe,0x4d,0x09,0x15,0x75,0x2b, 0xa3,0x42,0x0d,0x03,0x71,0xdf,0x0f,0xf4, 0x0e,0xe9,0x0c,0x46,0x93,0x3d,0x3f,0xa6, 0x6c,0xdb,0xca,0xe5,0xac,0x96,0xc8,0x64, 0x5c,0xec,0x4b,0x35,0x65,0xfc,0xfb,0x5a, 0x1b,0x04,0x1b,0xa1,0x0e,0xfd,0x88,0x15}; static const PRUint8 dsa_Q[] = { 0xad,0x22,0x59,0xdf,0xe5,0xec,0x4c,0x6e, 0xf9,0x43,0xf0,0x4b,0x2d,0x50,0x51,0xc6, 0x91,0x99,0x8b,0xcf}; static const PRUint8 dsa_G[] = { 0x78,0x6e,0xa9,0xd8,0xcd,0x4a,0x85,0xa4, 0x45,0xb6,0x6e,0x5d,0x21,0x50,0x61,0xf6, 0x5f,0xdf,0x5c,0x7a,0xde,0x0d,0x19,0xd3, 0xc1,0x3b,0x14,0xcc,0x8e,0xed,0xdb,0x17, 0xb6,0xca,0xba,0x86,0xa9,0xea,0x51,0x2d, 0xc1,0xa9,0x16,0xda,0xf8,0x7b,0x59,0x8a, 0xdf,0xcb,0xa4,0x67,0x00,0x44,0xea,0x24, 0x73,0xe5,0xcb,0x4b,0xaf,0x2a,0x31,0x25, 0x22,0x28,0x3f,0x16,0x10,0x82,0xf7,0xeb, 0x94,0x0d,0xdd,0x09,0x22,0x14,0x08,0x79, 0xba,0x11,0x0b,0xf1,0xff,0x2d,0x67,0xac, 0xeb,0xb6,0x55,0x51,0x69,0x97,0xa7,0x25, 0x6b,0x9c,0xa0,0x9b,0xd5,0x08,0x9b,0x27, 0x42,0x1c,0x7a,0x69,0x57,0xe6,0x2e,0xed, 0xa9,0x5b,0x25,0xe8,0x1f,0xd2,0xed,0x1f, 0xdf,0xe7,0x80,0x17,0xba,0x0d,0x4d,0x38}; /* DSA Known Random Values (known random key block is 160-bits) */ /* and (known random signature block is 160-bits). */ static const PRUint8 dsa_known_random_key_block[] = { "Mozilla Rules World!"}; static const PRUint8 dsa_known_random_signature_block[] = { "Random DSA Signature"}; /* DSA Known Digest (160-bits) */ static const PRUint8 dsa_known_digest[] = { "DSA Signature Digest" }; /* DSA Known Signature (320-bits). */ static const PRUint8 dsa_known_signature[] = { 0x25,0x7c,0x3a,0x79,0x32,0x45,0xb7,0x32, 0x70,0xca,0x62,0x63,0x2b,0xf6,0x29,0x2c, 0x22,0x2a,0x03,0xce,0x48,0x15,0x11,0x72, 0x7b,0x7e,0xf5,0x7a,0xf3,0x10,0x3b,0xde, 0x34,0xc1,0x9e,0xd7,0x27,0x9e,0x77,0x38}; /* DSA variables. */ DSAPrivateKey * dsa_private_key; SECStatus dsa_status; SECItem dsa_signature_item; SECItem dsa_digest_item; DSAPublicKey dsa_public_key; PRUint8 dsa_computed_signature[FIPS_DSA_SIGNATURE_LENGTH]; static const PQGParams dsa_pqg = { NULL, { FIPS_DSA_TYPE, (unsigned char *)dsa_P, FIPS_DSA_PRIME_LENGTH }, { FIPS_DSA_TYPE, (unsigned char *)dsa_Q, FIPS_DSA_SUBPRIME_LENGTH }, { FIPS_DSA_TYPE, (unsigned char *)dsa_G, FIPS_DSA_BASE_LENGTH }}; /*******************************************/ /* Generate a DSA public/private key pair. */ /*******************************************/ /* Generate a DSA public/private key pair. */ dsa_status = DSA_NewKeyFromSeed(&dsa_pqg, dsa_known_random_key_block, &dsa_private_key); if( dsa_status != SECSuccess ) return( CKR_HOST_MEMORY ); /* construct public key from private key. */ dsa_public_key.params = dsa_private_key->params; dsa_public_key.publicValue = dsa_private_key->publicValue; /*************************************************/ /* DSA Single-Round Known Answer Signature Test. */ /*************************************************/ dsa_signature_item.data = dsa_computed_signature; dsa_signature_item.len = sizeof dsa_computed_signature; dsa_digest_item.data = (unsigned char *)dsa_known_digest; dsa_digest_item.len = SHA1_LENGTH; /* Perform DSA signature process. */ dsa_status = DSA_SignDigestWithSeed( dsa_private_key, &dsa_signature_item, &dsa_digest_item, dsa_known_random_signature_block ); if( ( dsa_status != SECSuccess ) || ( dsa_signature_item.len != FIPS_DSA_SIGNATURE_LENGTH ) || ( PORT_Memcmp( dsa_computed_signature, dsa_known_signature, FIPS_DSA_SIGNATURE_LENGTH ) != 0 ) ) { dsa_status = SECFailure; } else { /****************************************************/ /* DSA Single-Round Known Answer Verification Test. */ /****************************************************/ /* Perform DSA verification process. */ dsa_status = DSA_VerifyDigest( &dsa_public_key, &dsa_signature_item, &dsa_digest_item); } PORT_FreeArena(dsa_private_key->params.arena, PR_TRUE); /* Don't free public key, it uses same arena as private key */ /* Verify DSA signature. */ if( dsa_status != SECSuccess ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fips_RNG_PowerUpSelfTest( void ) { static const PRUint8 Q[] = { 0x85,0x89,0x9c,0x77,0xa3,0x79,0xff,0x1a, 0x86,0x6f,0x2f,0x3e,0x2e,0xf9,0x8c,0x9c, 0x9d,0xef,0xeb,0xed}; static const PRUint8 GENX[] = { 0x65,0x48,0xe3,0xca,0xac,0x64,0x2d,0xf7, 0x7b,0xd3,0x4e,0x79,0xc9,0x7d,0xa6,0xa8, 0xa2,0xc2,0x1f,0x8f,0xe9,0xb9,0xd3,0xa1, 0x3f,0xf7,0x0c,0xcd,0xa6,0xca,0xbf,0xce, 0x84,0x0e,0xb6,0xf1,0x0d,0xbe,0xa9,0xa3}; static const PRUint8 rng_known_DSAX[] = { 0x7a,0x86,0xf1,0x7f,0xbd,0x4e,0x6e,0xd9, 0x0a,0x26,0x21,0xd0,0x19,0xcb,0x86,0x73, 0x10,0x1f,0x60,0xd7}; SECStatus rng_status = SECSuccess; PRUint8 DSAX[FIPS_DSA_SUBPRIME_LENGTH]; /*******************************************/ /* Run the SP 800-90 Health tests */ /*******************************************/ rng_status = PRNGTEST_RunHealthTests(); if (rng_status != SECSuccess) { return (CKR_DEVICE_ERROR); } /*******************************************/ /* Generate DSAX fow given Q. */ /*******************************************/ rng_status = FIPS186Change_ReduceModQForDSA(GENX, Q, DSAX); /* Verify DSAX to perform the RNG integrity check */ if( ( rng_status != SECSuccess ) || ( PORT_Memcmp( DSAX, rng_known_DSAX, (FIPS_DSA_SUBPRIME_LENGTH) ) != 0 ) ) return( CKR_DEVICE_ERROR ); return( CKR_OK ); } static CK_RV sftk_fipsSoftwareIntegrityTest(void) { CK_RV crv = CKR_OK; /* make sure that our check file signatures are OK */ if( !BLAPI_VerifySelf( NULL ) || !BLAPI_SHVerify( SOFTOKEN_LIB_NAME, (PRFuncPtr) sftk_fips_HMAC ) ) { crv = CKR_DEVICE_ERROR; /* better error code? checksum error? */ } return crv; } CK_RV sftk_fipsPowerUpSelfTest( void ) { CK_RV rv; /* RC2 Power-Up SelfTest(s). */ rv = sftk_fips_RC2_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* RC4 Power-Up SelfTest(s). */ rv = sftk_fips_RC4_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* DES Power-Up SelfTest(s). */ rv = sftk_fips_DES_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* DES3 Power-Up SelfTest(s). */ rv = sftk_fips_DES3_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* AES Power-Up SelfTest(s) for 128-bit key. */ rv = sftk_fips_AES_PowerUpSelfTest(FIPS_AES_128_KEY_SIZE); if( rv != CKR_OK ) return rv; /* AES Power-Up SelfTest(s) for 192-bit key. */ rv = sftk_fips_AES_PowerUpSelfTest(FIPS_AES_192_KEY_SIZE); if( rv != CKR_OK ) return rv; /* AES Power-Up SelfTest(s) for 256-bit key. */ rv = sftk_fips_AES_PowerUpSelfTest(FIPS_AES_256_KEY_SIZE); if( rv != CKR_OK ) return rv; /* MD2 Power-Up SelfTest(s). */ rv = sftk_fips_MD2_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* MD5 Power-Up SelfTest(s). */ rv = sftk_fips_MD5_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* SHA-X Power-Up SelfTest(s). */ rv = sftk_fips_SHA_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* HMAC SHA-X Power-Up SelfTest(s). */ rv = sftk_fips_HMAC_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* RSA Power-Up SelfTest(s). */ rv = sftk_fips_RSA_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* DSA Power-Up SelfTest(s). */ rv = sftk_fips_DSA_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; /* RNG Power-Up SelfTest(s). */ rv = sftk_fips_RNG_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; #ifndef NSS_DISABLE_ECC /* ECDSA Power-Up SelfTest(s). */ rv = sftk_fips_ECDSA_PowerUpSelfTest(); if( rv != CKR_OK ) return rv; #endif /* Software/Firmware Integrity Test. */ rv = sftk_fipsSoftwareIntegrityTest(); if( rv != CKR_OK ) return rv; /* Passed Power-Up SelfTest(s). */ return( CKR_OK ); }
43.252737
81
0.523252
[ "vector" ]
aed60d028240762a44fb6ec29e5db0a5e88a1fb5
6,524
h
C
cairo/src/cairo-truetype-subset-private.h
Classicmods/libretro-2048
0d04e3c7a9f4cfd48b9b10ccaded79a77f8b8f85
[ "Unlicense" ]
87
2015-01-04T13:57:18.000Z
2022-03-21T16:07:05.000Z
cairo/src/cairo-truetype-subset-private.h
Classicmods/libretro-2048
0d04e3c7a9f4cfd48b9b10ccaded79a77f8b8f85
[ "Unlicense" ]
12
2017-01-18T04:18:15.000Z
2022-03-20T20:52:07.000Z
cairo/src/cairo-truetype-subset-private.h
Classicmods/libretro-2048
0d04e3c7a9f4cfd48b9b10ccaded79a77f8b8f85
[ "Unlicense" ]
60
2015-01-15T06:16:39.000Z
2022-03-31T22:23:12.000Z
/* cairo - a vector graphics library with display and print output * * Copyright © 2006 Red Hat, Inc * * This library is free software; you can redistribute it and/or * modify it either under the terms of the GNU Lesser General Public * License version 2.1 as published by the Free Software Foundation * (the "LGPL") or, at your option, under the terms of the Mozilla * Public License Version 1.1 (the "MPL"). If you do not alter this * notice, a recipient may use your version of this file under either * the MPL or the LGPL. * * You should have received a copy of the LGPL along with this library * in the file COPYING-LGPL-2.1; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Suite 500, Boston, MA 02110-1335, USA * You should have received a copy of the MPL along with this library * in the file COPYING-MPL-1.1 * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY * OF ANY KIND, either express or implied. See the LGPL or the MPL for * the specific language governing rights and limitations. * * The Original Code is the cairo graphics library. * * The Initial Developer of the Original Code is Red Hat, Inc. * * Contributor(s): * Kristian Høgsberg <krh@redhat.com> * Adrian Johnson <ajohnson@redneon.com> */ #ifndef CAIRO_TRUETYPE_SUBSET_PRIVATE_H #define CAIRO_TRUETYPE_SUBSET_PRIVATE_H #include "cairoint.h" #if CAIRO_HAS_FONT_SUBSET /* The structs defined here should strictly follow the TrueType * specification and not be padded. We use only 16-bit integer * in their definition to guarantee that. The fields of type * "FIXED" in the TT spec are broken into two *_1 and *_2 16-bit * parts, and 64-bit members are broken into four. * * The test truetype-tables in the test suite makes sure that * these tables have the right size. Please update that test * if you add new tables/structs that should be packed. */ #define MAKE_TT_TAG(a, b, c, d) (a<<24 | b<<16 | c<<8 | d) #define TT_TAG_CFF MAKE_TT_TAG('C','F','F',' ') #define TT_TAG_cmap MAKE_TT_TAG('c','m','a','p') #define TT_TAG_cvt MAKE_TT_TAG('c','v','t',' ') #define TT_TAG_fpgm MAKE_TT_TAG('f','p','g','m') #define TT_TAG_glyf MAKE_TT_TAG('g','l','y','f') #define TT_TAG_head MAKE_TT_TAG('h','e','a','d') #define TT_TAG_hhea MAKE_TT_TAG('h','h','e','a') #define TT_TAG_hmtx MAKE_TT_TAG('h','m','t','x') #define TT_TAG_loca MAKE_TT_TAG('l','o','c','a') #define TT_TAG_maxp MAKE_TT_TAG('m','a','x','p') #define TT_TAG_name MAKE_TT_TAG('n','a','m','e') #define TT_TAG_post MAKE_TT_TAG('p','o','s','t') #define TT_TAG_prep MAKE_TT_TAG('p','r','e','p') /* All tt_* structs are big-endian */ typedef struct _tt_cmap_index { uint16_t platform; uint16_t encoding; uint32_t offset; } tt_cmap_index_t; typedef struct _tt_cmap { uint16_t version; uint16_t num_tables; tt_cmap_index_t index[1]; } tt_cmap_t; typedef struct _segment_map { uint16_t format; uint16_t length; uint16_t version; uint16_t segCountX2; uint16_t searchRange; uint16_t entrySelector; uint16_t rangeShift; uint16_t endCount[1]; } tt_segment_map_t; typedef struct _tt_head { int16_t version_1; int16_t version_2; int16_t revision_1; int16_t revision_2; uint16_t checksum_1; uint16_t checksum_2; uint16_t magic_1; uint16_t magic_2; uint16_t flags; uint16_t units_per_em; int16_t created_1; int16_t created_2; int16_t created_3; int16_t created_4; int16_t modified_1; int16_t modified_2; int16_t modified_3; int16_t modified_4; int16_t x_min; /* FWORD */ int16_t y_min; /* FWORD */ int16_t x_max; /* FWORD */ int16_t y_max; /* FWORD */ uint16_t mac_style; uint16_t lowest_rec_pppem; int16_t font_direction_hint; int16_t index_to_loc_format; int16_t glyph_data_format; } tt_head_t; typedef struct _tt_hhea { int16_t version_1; int16_t version_2; int16_t ascender; /* FWORD */ int16_t descender; /* FWORD */ int16_t line_gap; /* FWORD */ uint16_t advance_max_width; /* UFWORD */ int16_t min_left_side_bearing; /* FWORD */ int16_t min_right_side_bearing; /* FWORD */ int16_t x_max_extent; /* FWORD */ int16_t caret_slope_rise; int16_t caret_slope_run; int16_t reserved[5]; int16_t metric_data_format; uint16_t num_hmetrics; } tt_hhea_t; typedef struct _tt_maxp { int16_t version_1; int16_t version_2; uint16_t num_glyphs; uint16_t max_points; uint16_t max_contours; uint16_t max_composite_points; uint16_t max_composite_contours; uint16_t max_zones; uint16_t max_twilight_points; uint16_t max_storage; uint16_t max_function_defs; uint16_t max_instruction_defs; uint16_t max_stack_elements; uint16_t max_size_of_instructions; uint16_t max_component_elements; uint16_t max_component_depth; } tt_maxp_t; typedef struct _tt_name_record { uint16_t platform; uint16_t encoding; uint16_t language; uint16_t name; uint16_t length; uint16_t offset; } tt_name_record_t; typedef struct _tt_name { uint16_t format; uint16_t num_records; uint16_t strings_offset; tt_name_record_t records[1]; } tt_name_t; /* composite_glyph_t flags */ #define TT_ARG_1_AND_2_ARE_WORDS 0x0001 #define TT_WE_HAVE_A_SCALE 0x0008 #define TT_MORE_COMPONENTS 0x0020 #define TT_WE_HAVE_AN_X_AND_Y_SCALE 0x0040 #define TT_WE_HAVE_A_TWO_BY_TWO 0x0080 typedef struct _tt_composite_glyph { uint16_t flags; uint16_t index; uint16_t args[6]; /* 1 to 6 arguments depending on value of flags */ } tt_composite_glyph_t; typedef struct _tt_glyph_data { int16_t num_contours; int8_t data[8]; tt_composite_glyph_t glyph; } tt_glyph_data_t; #endif /* CAIRO_HAS_FONT_SUBSET */ #endif /* CAIRO_TRUETYPE_SUBSET_PRIVATE_H */
32.62
78
0.674739
[ "vector" ]
aedb9b475009c4dd6999b0b248dab9511a693b02
3,857
h
C
Dependencies/GorillaAudio/include/gorilla/common/gc_thread.h
JRBonilla/Slate
9bcb3befced30d8f9ffb2dcce0a0209ba76093e4
[ "MIT" ]
1,303
2015-02-15T05:12:55.000Z
2022-03-18T18:23:28.000Z
Dependencies/gorilla-audio/src/common/gc_thread.h
WildFire212/Sparky
a679d0834e37eb3570dff18b01550210734cb97e
[ "Apache-2.0" ]
124
2015-04-02T14:15:05.000Z
2021-05-05T12:47:16.000Z
Dependencies/gorilla-audio/src/common/gc_thread.h
WildFire212/Sparky
a679d0834e37eb3570dff18b01550210734cb97e
[ "Apache-2.0" ]
538
2015-02-19T21:53:15.000Z
2022-03-11T06:18:05.000Z
/** Threads and Synchronization. * * \file gc_thread.h */ #ifndef _GORILLA_GC_THREAD_H #define _GORILLA_GC_THREAD_H #include "gc_types.h" #ifdef __cplusplus extern "C" { #endif /* __cplusplus */ /************/ /* Thread */ /************/ /** Thread data structure and associated functions. * * \ingroup common * \defgroup gc_Thread Thread */ /** Enumerated thread priorities. * * \ingroup gc_Thread * \defgroup threadPrio Thread Priorities */ #define GC_THREAD_PRIORITY_NORMAL 0 /**< Normal thread priority. \ingroup threadPrio */ #define GC_THREAD_PRIORITY_LOW 1 /**< Low thread priority. \ingroup threadPrio */ #define GC_THREAD_PRIORITY_HIGH 2 /**< High thread priority. \ingroup threadPrio */ #define GC_THREAD_PRIORITY_HIGHEST 3 /**< Highest thread priority. \ingroup threadPrio */ /** Thread function callback. * * Threads execute functions. Those functions must match this prototype. * Thread functions should return non-zero values if they encounter an * error, zero if they terminate without error. * * \ingroup gc_Thread * \param in_context The user-specified thread context. * \return GC_SUCCESS if thread terminated without error. GC_ERROR_GENERIC * if not. */ typedef gc_int32 (*gc_ThreadFunc)(void* in_context); /** Thread data structure [\ref SINGLE_CLIENT]. * * \ingroup gc_Thread */ typedef struct gc_Thread { gc_ThreadFunc threadFunc; void* threadObj; void* context; gc_int32 id; gc_int32 priority; gc_int32 stackSize; } gc_Thread; /** Creates a new thread. * * The created thread will not run until gc_thread_run() is called on it. * * \ingroup gc_Thread */ gc_Thread* gc_thread_create(gc_ThreadFunc in_threadFunc, void* in_context, gc_int32 in_priority, gc_int32 in_stackSize); /** Runs a thread. * * \ingroup gc_Thread */ void gc_thread_run(gc_Thread* in_thread); /** Joins a thread with the current thread. * * \ingroup gc_Thread */ void gc_thread_join(gc_Thread* in_thread); /** Signals a thread to wait for a specified time interval. * * While the time interval is specified in milliseconds, different operating * systems have different guarantees about the minimum time interval provided. * If accurate sleep timings are desired, make sure the thread priority is set * to GC_THREAD_PRIORITY_HIGH or GC_THREAD_PRIORITY_HIGHEST. * * \ingroup gc_Thread */ void gc_thread_sleep(gc_uint32 in_ms); /** Destroys a thread object. * * \ingroup gc_Thread * \warning This should usually only be called once the the thread has * successfully joined with another thread. * \warning Never use a thread after it has been destroyed. */ void gc_thread_destroy(gc_Thread* in_thread); /***********/ /* Mutex */ /***********/ /** Mutual exclusion lock data structure and associated functions. * * \ingroup common * \defgroup gc_Mutex Mutex */ /** Mutual exclusion lock (mutex) thread synchronization primitive data structure [\ref SINGLE_CLIENT]. * * \ingroup gc_Mutex */ typedef struct gc_Mutex { void* mutex; } gc_Mutex; /** Creates a mutex. * * \ingroup gc_Mutex */ gc_Mutex* gc_mutex_create(); /** Locks a mutex. * * In general, any lock should have a matching unlock(). * * \ingroup gc_Mutex * \warning Do not lock a mutex on the same thread without first unlocking. */ void gc_mutex_lock(gc_Mutex* in_mutex); /** Unlocks a mutex. * * \ingroup gc_Mutex * \warning Do not unlock a mutex without first locking it. */ void gc_mutex_unlock(gc_Mutex* in_mutex); /** Destroys a mutex. * * \ingroup gc_Mutex * \warning Make sure the mutex is no longer in use before destroying it. * \warning Never use a mutex after it has been destroyed. */ void gc_mutex_destroy(gc_Mutex* in_mutex); #ifdef __cplusplus } #endif /* __cplusplus */ #endif /* _GORILLA_GC_H */
25.045455
103
0.70936
[ "object" ]
aedbd9540aef84714b55774e606fad3d7b72875d
9,372
h
C
GWToolboxdll/Windows/CompletionWindow.h
ChyprioteGW/GWToolboxpp
36a017dd073b05519213d4f79357f6a771b654d8
[ "MIT" ]
1
2020-08-11T20:15:25.000Z
2020-08-11T20:15:25.000Z
GWToolboxdll/Windows/CompletionWindow.h
necropola/GWToolboxpp
36a017dd073b05519213d4f79357f6a771b654d8
[ "MIT" ]
null
null
null
GWToolboxdll/Windows/CompletionWindow.h
necropola/GWToolboxpp
36a017dd073b05519213d4f79357f6a771b654d8
[ "MIT" ]
null
null
null
#pragma once #include "ToolboxWindow.h" #pragma once #include <Windows.h> #include <d3d9.h> #include <GWCA/Context/GameContext.h> #include <GWCA/Context/WorldContext.h> #include <GWCA/Constants/Constants.h> #include <GWCA/Utilities/Hook.h> #include <Modules/Resources.h> #include <Color.h> namespace Missions { struct MissionImage { const wchar_t* file_name; const int resource_id; IDirect3DTexture9* texture = nullptr; MissionImage(const wchar_t* _file_name, const int _resource_id) : file_name(_file_name), resource_id(_resource_id) {}; }; class Mission { protected: using MissionImageList = std::vector<MissionImage>; static Color is_daily_bg_color; static Color has_quest_bg_color; GuiUtils::EncString name; GW::Constants::MapID outpost; GW::Constants::MapID map_to; uint32_t zm_quest; const MissionImageList& normal_mode_textures; const MissionImageList& hard_mode_textures; public: Mission(GW::Constants::MapID, const MissionImageList&, const MissionImageList&, uint32_t); static ImVec2 icon_size; GW::Constants::MapID GetOutpost(); bool is_completed = false; bool bonus = false; bool map_unlocked = true; virtual const char* Name(); virtual bool Draw(IDirect3DDevice9*); virtual void OnClick(); virtual IDirect3DTexture9* GetMissionImage(); virtual bool IsDaily(); // True if this mission is ZM or ZB today virtual bool HasQuest(); // True if the ZM or ZB is in quest log virtual void CheckProgress(const std::wstring& player_name); }; class PvESkill : public Mission { protected: GW::Constants::SkillID skill_id; bool img_loaded = false; const wchar_t* image_url = 0; IDirect3DTexture9* skill_image = 0; public: uint32_t profession = 0; inline static MissionImageList dummy_var = {}; PvESkill(GW::Constants::SkillID _skill_id, const wchar_t* _image_url); virtual IDirect3DTexture9* GetMissionImage() override; bool IsDaily() override { return false; } bool HasQuest() override { return false; } virtual bool Draw(IDirect3DDevice9*) override; virtual void OnClick() override; virtual void CheckProgress(const std::wstring& player_name) override; }; class HeroUnlock : public PvESkill { public: HeroUnlock(GW::Constants::HeroID _hero_id); IDirect3DTexture9* GetMissionImage() override; void OnClick() override; virtual void CheckProgress(const std::wstring& player_name) override; const char* Name() override; }; class FactionsPvESkill : public PvESkill { protected: GW::Constants::SkillID skill_id2; public: FactionsPvESkill(GW::Constants::SkillID kurzick_id, GW::Constants::SkillID luxon_id, const wchar_t* _image_url); bool Draw(IDirect3DDevice9*) override; virtual void CheckProgress(const std::wstring& player_name) override; }; class PropheciesMission : public Mission { public: static MissionImageList normal_mode_images; static MissionImageList hard_mode_images; PropheciesMission(GW::Constants::MapID _outpost, uint32_t _zm_quest = 0) : Mission(_outpost, normal_mode_images, hard_mode_images, _zm_quest) {} }; class FactionsMission : public Mission { private: public: static MissionImageList normal_mode_images; static MissionImageList hard_mode_images; FactionsMission(GW::Constants::MapID _outpost, uint32_t _zm_quest = 0) : Mission(_outpost, normal_mode_images, hard_mode_images, _zm_quest) {} }; class NightfallMission : public Mission { private: protected: NightfallMission(GW::Constants::MapID _outpost, const MissionImageList& _normal_mode_images, const MissionImageList& _hard_mode_images, uint32_t _zm_quest) : Mission(_outpost, _normal_mode_images, _hard_mode_images, _zm_quest) {} public: static MissionImageList normal_mode_images; static MissionImageList hard_mode_images; NightfallMission(GW::Constants::MapID _outpost, uint32_t _zm_quest = 0) : Mission(_outpost, normal_mode_images, hard_mode_images, _zm_quest) {} }; class TormentMission : public NightfallMission { private: public: static MissionImageList normal_mode_images; static MissionImageList hard_mode_images; TormentMission(GW::Constants::MapID _outpost, uint32_t _zm_quest = 0) : NightfallMission(_outpost, normal_mode_images, hard_mode_images, _zm_quest) {} }; class Vanquish : public Mission { public: static MissionImageList hard_mode_images; Vanquish(GW::Constants::MapID _outpost, uint32_t _zm_quest = 0) : Mission(_outpost, hard_mode_images, hard_mode_images, _zm_quest) { } IDirect3DTexture9* GetMissionImage(); virtual void CheckProgress(const std::wstring& player_name) override; }; class EotNMission : public Mission { private: std::string name; protected: EotNMission(GW::Constants::MapID _outpost, const MissionImageList& _normal_mode_images, const MissionImageList& _hard_mode_images, uint32_t _zm_quest) : Mission(_outpost, _normal_mode_images, _hard_mode_images, _zm_quest) {} public: static MissionImageList normal_mode_images; static MissionImageList hard_mode_images; EotNMission(GW::Constants::MapID _outpost, uint32_t _zm_quest = 0) : Mission(_outpost, normal_mode_images, hard_mode_images, _zm_quest) {} IDirect3DTexture9* GetMissionImage(); virtual void CheckProgress(const std::wstring& player_name) override; }; class Dungeon : public EotNMission { private: std::vector<uint32_t> zb_quests; public: static MissionImageList normal_mode_images; static MissionImageList hard_mode_images; Dungeon(GW::Constants::MapID _outpost, std::vector<uint32_t> _zb_quests) : EotNMission(_outpost, normal_mode_images, hard_mode_images, 0), zb_quests(_zb_quests) {} Dungeon(GW::Constants::MapID _outpost, uint32_t _zb_quest = 0) : EotNMission(_outpost, normal_mode_images, hard_mode_images, 0), zb_quests({ _zb_quest }) {} bool IsDaily() override; bool HasQuest() override; }; } // class used to keep a list of hotkeys, capture keyboard event and fire hotkeys as needed class CompletionWindow : public ToolboxWindow { protected: bool hide_unlocked_skills = false; bool hide_completed_vanquishes = false; bool hide_completed_missions = false; bool pending_sort = true; const char* completion_ini_filename = "character_completion.ini"; bool hard_mode = false; enum CompletionType : uint8_t { Skills, Mission, MissionBonus, MissionHM, MissionBonusHM, Vanquishes, Heroes, MapsUnlocked }; struct Completion { GW::Constants::Profession profession; std::string name_str; std::vector<uint32_t> skills; std::vector<uint32_t> mission; std::vector<uint32_t> mission_bonus; std::vector<uint32_t> mission_hm; std::vector<uint32_t> mission_bonus_hm; std::vector<uint32_t> vanquishes; std::vector<uint32_t> heroes; std::vector<uint32_t> maps_unlocked; }; public: static CompletionWindow& Instance() { static CompletionWindow instance; return instance; } const char* Name() const override { return "Completion"; } const char* Icon() const override { return ICON_FA_BOOK; } const bool IsHardMode() { return hard_mode; } void Initialize() override; void Initialize_Prophecies(); void Initialize_Factions(); void Initialize_Nightfall(); void Initialize_EotN(); void Initialize_Dungeons(); void Terminate() override; void Draw(IDirect3DDevice9* pDevice) override; std::unordered_map<std::wstring, Completion*> character_completion; Completion* GetCharacterCompletion(const wchar_t* name, bool create_if_not_found = false); // IF character_name is null, parse current logged in char. CompletionWindow* ParseCompletionBuffer(CompletionType type, wchar_t* character_name = 0, uint32_t* buffer = 0, size_t len = 0); void DrawSettingInternal() override; void LoadSettings(CSimpleIni* ini) override; void SaveSettings(CSimpleIni* ini) override; // Check explicitly rather than every frame CompletionWindow* CheckProgress(); GW::HookEntry skills_unlocked_stoc_entry; std::map<GW::Constants::Campaign, std::vector<Missions::Mission*>> missions; std::map<GW::Constants::Campaign, std::vector<Missions::Mission*>> vanquishes; std::map<GW::Constants::Campaign, std::vector<Missions::PvESkill*>> elite_skills; std::map<GW::Constants::Campaign, std::vector<Missions::PvESkill*>> pve_skills; std::map<GW::Constants::Campaign, std::vector<Missions::HeroUnlock*>> heros; };
31.449664
132
0.679044
[ "vector" ]
aedcf8f5f6f113e607424c98a6e192acfbf47a30
1,369
h
C
include/configurable_control_hw/Actuator_Control_Interface.h
EbinPhilip/configurable_control_hw
7099df01abd4dbb4e3af795f0a53878a93cff839
[ "MIT" ]
null
null
null
include/configurable_control_hw/Actuator_Control_Interface.h
EbinPhilip/configurable_control_hw
7099df01abd4dbb4e3af795f0a53878a93cff839
[ "MIT" ]
null
null
null
include/configurable_control_hw/Actuator_Control_Interface.h
EbinPhilip/configurable_control_hw
7099df01abd4dbb4e3af795f0a53878a93cff839
[ "MIT" ]
null
null
null
#ifndef __ACTUATOR_CONTROL_INTERFACE_H__ #define __ACTUATOR_CONTROL_INTERFACE_H__ #include "Actuator_Properties.h" #include "Actuator_Controller.h" #include <string> #include <vector> #include <memory> #include <map> // composes various actuator controllers and presents a common interface for all class Actuator_Control_Interface : public Actuator_Controller { public: Actuator_Control_Interface(Actuator_Controller_Map controller_map, bool& stop_flag, const std::string& instance_name = "control interface"); ~Actuator_Control_Interface(); virtual void readState() override; virtual void writeCommand() override; virtual void enableActuators() override; virtual void disableActuators() override; virtual bool getErrorDetails(std::string& error_msg) override; virtual Actuator_Properties_Ptr getActuator(const std::string&) override; virtual void getActuatorNames(std::vector<std::string>&) override; protected: // map of controller names Actuator_Controller_Map controller_map_; std::string instance_name_; bool& stop_flag_; bool error_status_; // map of actuator name to controller name std::map<std::string, Actuator_Controller_Ptr> actuator_name_controller_map_; }; typedef std::shared_ptr<Actuator_Control_Interface> Actuator_Control_Interface_Ptr; #endif
30.422222
88
0.767714
[ "vector" ]
aede8ee03c193392bfb39984a912de7897869faa
2,427
h
C
src/SeccompFilterRewriter.h
yuyichao/rr
18f2ae57eee76e50c216066ad9163a90d0dfddb5
[ "BSD-1-Clause" ]
2
2018-11-07T00:33:54.000Z
2019-09-02T07:36:19.000Z
src/SeccompFilterRewriter.h
yuyichao/rr
18f2ae57eee76e50c216066ad9163a90d0dfddb5
[ "BSD-1-Clause" ]
4
2018-07-14T23:44:05.000Z
2018-11-28T00:04:30.000Z
src/SeccompFilterRewriter.h
yuyichao/rr
18f2ae57eee76e50c216066ad9163a90d0dfddb5
[ "BSD-1-Clause" ]
6
2018-06-07T02:28:36.000Z
2019-09-02T07:36:30.000Z
/* -*- Mode: C++; tab-width: 8; c-basic-offset: 2; indent-tabs-mode: nil; -*- */ #ifndef RR_SECCOMP_FILTER_REWRITER_H_ #define RR_SECCOMP_FILTER_REWRITER_H_ #include <assert.h> #include <cstdint> #include <unordered_map> #include <vector> /** * When seccomp decides not to execute a syscall the kernel returns to userspace * without modifying the registers. There is no negative return value to * indicate that whatever side effects the syscall would happen did not take * place. This is a problem for rr, because for syscalls that require special * handling, we'll be performing that handling even though the syscall didn't * actually happen. * * To get around this we can use the same mechanism that is used to skip the * syscall in the kernel to skip it ourselves: original_syscallno. We can't * use the traditional value of -1 though, because the kernel initializes * original_syscallno to -1 when delivering signals, and exiting sigreturn * will restore that. Not recording the side effects of sigreturn would be * bad. Instead we use -2, which still causes skipping the syscall when * given to the kernel as original_syscallno, but is never generated by the * kernel itself. */ #define SECCOMP_MAGIC_SKIP_ORIGINAL_SYSCALLNO -2 namespace rr { class RecordTask; /** * Object to support install_patched_seccomp_filter. */ class SeccompFilterRewriter { public: /** * Assuming |t| is set up for a prctl or seccomp syscall that * installs a seccomp-bpf filter, patch the filter to signal the tracer * instead of silently delivering an errno, and install it. */ void install_patched_seccomp_filter(RecordTask* t); uint32_t map_filter_data_to_real_result(uint16_t value) { assert(value < index_to_result.size()); return index_to_result[value]; } private: /** * Seccomp filters can return 32-bit result values. We need to map all of * them into a single 16 bit data field. Fortunately (so far) all the * filters we've seen return constants, so there aren't too many distinct * values we need to deal with. For each constant value that gets returned, * we'll add it as the key in |result_map|, with the corresponding value * being the 16-bit data value that our rewritten filter returns. */ std::unordered_map<uint32_t, uint16_t> result_to_index; std::vector<uint32_t> index_to_result; }; } // namespace rr #endif // RR_SECCOMP_FILTER_REWRITER_H_
35.691176
80
0.749897
[ "object", "vector" ]
aedf82a40bf4633deb2f39beee1d0a364c78e02b
4,100
h
C
src/kits/debugger/private/types/ValueLocation.h
stasinek/BHAPI
5d9aa61665ae2cc5c6e34415957d49a769325b2b
[ "BSD-3-Clause", "MIT" ]
3
2018-05-21T15:32:32.000Z
2019-03-21T13:34:55.000Z
src/kits/debugger/private/types/ValueLocation.h
stasinek/BHAPI
5d9aa61665ae2cc5c6e34415957d49a769325b2b
[ "BSD-3-Clause", "MIT" ]
null
null
null
src/kits/debugger/private/types/ValueLocation.h
stasinek/BHAPI
5d9aa61665ae2cc5c6e34415957d49a769325b2b
[ "BSD-3-Clause", "MIT" ]
null
null
null
/* * Copyright 2009-2012, Ingo Weinhold, ingo_weinhold@gmx.de. * Copyright 2013, Rene Gollent, rene@gollent.com. * Distributed under the terms of the MIT License. */ #ifndef VALUE_LOCATION_H #define VALUE_LOCATION_H #include <vector> #include <stdlib.h> #include <kits/support/String.h> #include <Referenceable.h> #include <Types.h> enum value_piece_location_type { VALUE_PIECE_LOCATION_INVALID, // structure is invalid VALUE_PIECE_LOCATION_UNKNOWN, // location unknown, but size is valid VALUE_PIECE_LOCATION_MEMORY, // piece is in memory VALUE_PIECE_LOCATION_REGISTER, // piece is in a register VALUE_PIECE_LOCATION_IMPLICIT // value isn't stored anywhere in memory but is known }; struct ValuePieceLocation { union { target_addr_t address; // memory address uint32 reg; // register number }; target_size_t size; // size in bytes (including // incomplete ones) uint64 bitSize; // total size in bits uint64 bitOffset; // bit offset (to the most // significant bit) value_piece_location_type type; void* value; // used for storing implicit values bool writable; // indicates if the piece is in a // location in the target team // where it can be modified ValuePieceLocation() : type(VALUE_PIECE_LOCATION_INVALID), value(NULL), writable(false) { } ValuePieceLocation(const ValuePieceLocation& other) { if (!Copy(other)) throw std::bad_alloc(); } ~ValuePieceLocation() { if (value != NULL) free(value); } ValuePieceLocation& operator=(const ValuePieceLocation& other) { if (!Copy(other)) throw std::bad_alloc(); return *this; } bool Copy(const ValuePieceLocation& other) { memcpy(this, &other, sizeof(ValuePieceLocation)); if (type == VALUE_PIECE_LOCATION_IMPLICIT) { void* tempValue = malloc(size); if (tempValue == NULL) { type = VALUE_PIECE_LOCATION_INVALID; return false; } memcpy(tempValue, value, other.size); value = tempValue; } return true; } bool IsValid() const { return type != VALUE_PIECE_LOCATION_INVALID; } void SetToUnknown() { type = VALUE_PIECE_LOCATION_UNKNOWN; } void SetToMemory(target_addr_t address) { type = VALUE_PIECE_LOCATION_MEMORY; this->address = address; this->writable = true; } void SetToRegister(uint32 reg) { type = VALUE_PIECE_LOCATION_REGISTER; this->reg = reg; this->writable = true; } void SetSize(target_size_t size) { this->size = size; this->bitSize = size * 8; this->bitOffset = 0; } void SetSize(uint64 bitSize, uint64 bitOffset) { this->size = (bitOffset + bitSize + 7) / 8; this->bitSize = bitSize; this->bitOffset = bitOffset; } bool SetToValue(const void* data, target_size_t size) { char* valueData = (char*)malloc(size); if (valueData == NULL) return false; memcpy(valueData, data, size); SetSize(size); type = VALUE_PIECE_LOCATION_IMPLICIT; value = valueData; writable = false; return true; } ValuePieceLocation& Normalize(bool bigEndian); }; class ValueLocation : public BReferenceable { public: ValueLocation(); ValueLocation(bool bigEndian); ValueLocation(bool bigEndian, const ValuePieceLocation& piece); ValueLocation(const ValueLocation& other); bool SetToByteOffset(const ValueLocation& other, uint64 byteffset, uint64 Size); bool SetTo(const ValueLocation& other, uint64 bitOffset, uint64 bitSize); void Clear(); bool IsBigEndian() const { return fBigEndian; } bool IsWritable() const { return fWritable; } bool AddPiece(const ValuePieceLocation& piece); int32 CountPieces() const; ValuePieceLocation PieceAt(int32 index) const; bool SetPieceAt(int32 index, const ValuePieceLocation& piece); ValueLocation& operator=(const ValueLocation& other); void Dump() const; private: typedef std::vector<ValuePieceLocation> PieceVector; private: PieceVector fPieces; bool fBigEndian; bool fWritable; }; #endif // VALUE_LOCATION_H
22.043011
84
0.692195
[ "vector" ]
aee2e2c363ea288e5cbb7c769469abc31015dfb9
2,178
h
C
Pods/Digits/iOS/DigitsKit.framework/Headers/DGTOAuthSigning.h
haithamkhedre/q-municate
ca7b430756566334391b1d4e22d05c6454d13864
[ "Apache-2.0" ]
592
2016-12-29T18:30:08.000Z
2022-03-23T09:55:33.000Z
Pods/Digits/iOS/DigitsKit.framework/Headers/DGTOAuthSigning.h
haithamkhedre/q-municate
ca7b430756566334391b1d4e22d05c6454d13864
[ "Apache-2.0" ]
179
2016-12-28T21:43:07.000Z
2020-07-30T12:04:30.000Z
Pods/Digits/iOS/DigitsKit.framework/Headers/DGTOAuthSigning.h
haithamkhedre/q-municate
ca7b430756566334391b1d4e22d05c6454d13864
[ "Apache-2.0" ]
101
2016-12-29T00:22:46.000Z
2021-08-08T10:16:13.000Z
// // DGTOAuthSigning.h // DigitsKit // // Copyright (c) 2015 Twitter Inc. All rights reserved. // #import <TwitterCore/TWTRCoreOAuthSigning.h> @class TWTRAuthConfig; @class DGTSession; @interface DGTOAuthSigning : NSObject <TWTRCoreOAuthSigning> /** * @name Initialization */ /** * Instantiate a `DGTOAuthSigning` object with the parameters it needs to generate the OAuth signatures. * * @param authConfig (required) Encapsulates credentials required to authenticate a Digits application. * @param authSession (required) Encapsulated credentials associated with a user session. * * @return An initialized `DGTOAuthSigning` object or nil if any of the parameters are missing. * * @see TWTRAuthConfig * @see DGTSession */ - (instancetype)initWithAuthConfig:(TWTRAuthConfig *)authConfig authSession:(DGTSession *)authSession NS_DESIGNATED_INITIALIZER; /** * Unavailable. Use `-initWithAuthConfig:authSession:` instead. */ - (instancetype)init __attribute__((unavailable("Use -initWithAuthConfig:authSession: instead."))); /** * This method provides you with the OAuth signature, as well as the formed URL with the query string, to send a request to `/sdk/account`. * * @param params (optional) Extra custom params to be added to the Request URL. These parameters will be part of the signature which authenticity is validated by the Digits' API. These extra parameters help as a Nonce between the client's session but they are ignored by the Digits' API. The params in the Request URL can be parsed and used by your server to validate that this header is not being reused by another one of your sessions. * * @return A dictionary with the fully formed Request URL under `TWTROAuthEchoRequestURLStringKey` (`NSString`), and the `Authorization` header in `TWTROAuthEchoAuthorizationHeaderKey` (`NSString`), to be used to sign the request. * * @see More information about OAuth Echo: https://dev.twitter.com/oauth/echo * @see More information about Verify Credentials: https://dev.twitter.com/twitter-kit/ios/oauth-echo */ - (NSDictionary *)OAuthEchoHeadersToVerifyCredentialsWithParams:(NSDictionary *)params; @end
43.56
448
0.758494
[ "object" ]
aef292fee7e804fb9180b67e847b2a6d48059756
2,573
h
C
nntrainer/layers/preprocess_flip_layer.h
lhs8928/nntrainer
f1c1e9b45043a2239369ebf41fd8e4189ed73c51
[ "Apache-2.0" ]
null
null
null
nntrainer/layers/preprocess_flip_layer.h
lhs8928/nntrainer
f1c1e9b45043a2239369ebf41fd8e4189ed73c51
[ "Apache-2.0" ]
null
null
null
nntrainer/layers/preprocess_flip_layer.h
lhs8928/nntrainer
f1c1e9b45043a2239369ebf41fd8e4189ed73c51
[ "Apache-2.0" ]
null
null
null
// SPDX-License-Identifier: Apache-2.0 /** * Copyright (C) 2020 Parichay Kapoor <pk.kapoor@samsung.com> * * @file preprocess_flip_layer.h * @date 20 January 2020 * @see https://github.com/nnstreamer/nntrainer * @author Parichay Kapoor <pk.kapoor@samsung.com> * @bug No known bugs except for NYI items * @brief This is Preprocess Random Flip Layer Class for Neural Network * */ #ifndef __PREPROCESS_FLIP_LAYER_H__ #define __PREPROCESS_FLIP_LAYER_H__ #ifdef __cplusplus #include <random> #include <layer_devel.h> namespace nntrainer { /** * @class Preprocess FLip Layer * @brief Preprocess FLip Layer */ class PreprocessFlipLayer : public Layer { public: /** * @brief Constructor of Preprocess FLip Layer */ PreprocessFlipLayer(); /** * @brief Destructor of Preprocess FLip Layer */ ~PreprocessFlipLayer() = default; /** * @brief Move constructor of PreprocessLayer. * @param[in] PreprocessLayer && */ PreprocessFlipLayer(PreprocessFlipLayer &&rhs) noexcept = default; /** * @brief Move assignment operator. * @parma[in] rhs PreprocessLayer to be moved. */ PreprocessFlipLayer &operator=(PreprocessFlipLayer &&rhs) = default; /** * @copydoc Layer::finalize(InitLayerContext &context) */ void finalize(InitLayerContext &context) override; /** * @copydoc Layer::forwarding(RunLayerContext &context, bool training) */ void forwarding(RunLayerContext &context, bool training) override; /** * @copydoc Layer::calcDerivative(RunLayerContext &context) */ void calcDerivative(RunLayerContext &context) override; /** * @copydoc bool supportBackwarding() const */ bool supportBackwarding() const override { return false; }; /** * @copydoc Layer::exportTo(Exporter &exporter, ExportMethods method) */ void exportTo(Exporter &exporter, const ExportMethods &method) const override; /** * @copydoc Layer::getType() */ const std::string getType() const override { return PreprocessFlipLayer::type; }; /** * @copydoc Layer::setProperty(const std::vector<std::string> &values) */ void setProperty(const std::vector<std::string> &values) override; inline static const std::string type = "preprocess_flip"; private: std::mt19937 rng; /**< random number generator */ std::uniform_real_distribution<float> flip_dist; /**< uniform random distribution */ std::tuple<props::FlipDirection> preprocess_flip_props; }; } // namespace nntrainer #endif /* __cplusplus */ #endif /* __PREPROCESS_FLIP_LAYER_H__ */
25.22549
80
0.698407
[ "vector" ]
aef3e0ad7252e1353f7d0e5c5d8b7dc73b8b7266
9,181
c
C
College projects/Informatica Basica (C)/Pract 13/ej13.c
rafatyn/Proyectos
d080e040a2b1205b7b15f5ada82fb759e3cd2869
[ "MIT" ]
null
null
null
College projects/Informatica Basica (C)/Pract 13/ej13.c
rafatyn/Proyectos
d080e040a2b1205b7b15f5ada82fb759e3cd2869
[ "MIT" ]
null
null
null
College projects/Informatica Basica (C)/Pract 13/ej13.c
rafatyn/Proyectos
d080e040a2b1205b7b15f5ada82fb759e3cd2869
[ "MIT" ]
null
null
null
#include <stdio.h> #define MAXL 10 #define MAXF 50 typedef struct{ float temperatura; float precipitacion; float viento; }TDatos; TDatos Observacion[MAXL][MAXF]; int NumeroLugares=0; char NombreLugar[MAXL][50]; int NumeroFechas=0; typedef struct{ int dia; int mes; int anio; }Fecha; Fecha FechaObservacion[MAXF]; void nuevolugar (void){ int i; if(NumeroLugares>=MAXL){ printf("No puede introducir más lugares.");} else{ printf("Introduzca el nombre del nuevo lugar: "); getchar(); scanf("%[^\n]",NombreLugar[NumeroLugares]); for(i=0;i<NumeroFechas;i++){ printf("Introduzca los datos del día %d/%d/%d en %s",FechaObservacion[i].dia,FechaObservacion[i].mes,FechaObservacion[i].anio,NombreLugar[NumeroLugares]); printf("\nTemperatura: "); scanf("%f",&Observacion[NumeroLugares][i].temperatura); printf("\nPrecipitación: "); scanf("%f",&Observacion[NumeroLugares][i].precipitacion); printf("\nViento: "); scanf("%f",&Observacion[NumeroLugares][i].viento);} NumeroLugares++;}} void nuevafecha (void){ int i; if(NumeroFechas>=MAXF){ printf("No puede introducir más fechas.");} else{ printf("Introduzca la nueva fecha (dd mm aaaa): "); scanf("%d",&FechaObservacion[NumeroFechas].dia); scanf("%d",&FechaObservacion[NumeroFechas].mes); scanf("%d",&FechaObservacion[NumeroFechas].anio); for(i=0;i<NumeroLugares;i++){ printf("Introduzca los datos de %s: ",NombreLugar[i]); printf("\nTemperatura: "); scanf("%f",&Observacion[i][NumeroFechas].temperatura); printf("\nPrecipitación: "); scanf("%f",&Observacion[i][NumeroFechas].precipitacion); printf("\nViento: "); scanf("%f",&Observacion[i][NumeroFechas].viento);} NumeroFechas++;}} float Minimo (float Vector[],int nElementos,int *Posicion); float Media (float Vector[] ,int nElementos); int elegirTipoDato (void); float Maximo (float Vector[],int nElementos,int *Posicion); void mostrarMedia (int TipoDato){ int i,j; float med[NumeroFechas]; switch(TipoDato){ case 0: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ med[j]=Observacion[i][j].temperatura;} printf("La media de las temperaturas en %s fue de: %.2fºC.\n", NombreLugar[i], Media (med,NumeroFechas));}break; case 1: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ med[j]=Observacion[i][j].precipitacion;} printf("La media de las precipitaciones en %s fue de: %.2f mm.\n", NombreLugar[i], Media (med,NumeroFechas));}break; case 2: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ med[j]=Observacion[i][j].viento;} printf("La media del viento en %s fue de: %.2f km/h.\n", NombreLugar[i], Media (med,NumeroFechas));}break; default: printf("La opción seleccionada no existe, por favor seleccione otra."); mostrarMedia (elegirTipoDato());break;}} float Media (float Vector[],int nElementos){ int i; float media=0; for(i=0;i<nElementos;i++){ media+=Vector[i];} media=media/nElementos; return(media);} void mostrarMaximo (int TipoDato){ int i,j; int Posicion; float max[NumeroFechas]; switch(TipoDato){ case 0: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ max[j]=Observacion[i][j].temperatura;} printf("La temperatura máxima en %s se registró el día %d/%d/%d y fue de: %.2fºC.\n", NombreLugar[i], FechaObservacion[Posicion].dia, FechaObservacion[Posicion].mes, FechaObservacion[Posicion].anio, Maximo (max,NumeroFechas,&Posicion));}break; case 1: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ max[j]=Observacion[i][j].precipitacion;} printf("La precipitación máxima en %s se registró el día %d/%d/%d y fue de: %.2f mm.\n", NombreLugar[i], FechaObservacion[Posicion].dia, FechaObservacion[Posicion].mes, FechaObservacion[Posicion].anio, Maximo (max,NumeroFechas,&Posicion));}break; case 2: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ max[j]=Observacion[i][j].viento;} printf("El viento máximo en %s se registró el día %d/%d/%d y fue de: %.2f km/h.\n", NombreLugar[i], FechaObservacion[Posicion].dia, FechaObservacion[Posicion].mes, FechaObservacion[Posicion].anio, Maximo (max,NumeroFechas,&Posicion));}break; default: printf("La opción seleccionada no existe, por favor seleccione otra."); mostrarMaximo (elegirTipoDato());break;}} float Maximo (float Vector[],int nElementos,int *Posicion){ int i; float maximo; *Posicion=0; maximo=Vector[0]; for(i=1;i<nElementos;i++){ if(maximo<Vector[i]){ maximo=Vector[i]; *Posicion=i;}} return(maximo);} void mostrarMinimo (int TipoDato){ int i,j; int Posicion; float min[NumeroFechas]; switch(TipoDato){ case 0: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ min[j]=Observacion[i][j].temperatura;} printf("La temperatura mínima en %s se registró el día %d/%d/%d y fue de: %.2fºC.\n", NombreLugar[i], FechaObservacion[Posicion].dia, FechaObservacion[Posicion].mes, FechaObservacion[Posicion].anio, Minimo (min,NumeroFechas,&Posicion));}break; case 1: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ min[j]=Observacion[i][j].precipitacion;} printf("La precipitación máxima en %s se registró el día %d/%d/%d y fue de: %.2f mm.\n", NombreLugar[i], FechaObservacion[Posicion].dia, FechaObservacion[Posicion].mes, FechaObservacion[Posicion].anio, Minimo (min,NumeroFechas,&Posicion));}break; case 2: for(i=0;i<NumeroLugares;i++){ for(j=0;j<NumeroFechas;j++){ min[j]=Observacion[i][j].viento;} printf("El viento mínimo en %s se registró el día %d/%d/%d y fue de: %.2f km/h.\n", NombreLugar[i], FechaObservacion[Posicion].dia, FechaObservacion[Posicion].mes, FechaObservacion[Posicion].anio, Minimo (min,NumeroFechas,&Posicion));}break; default: printf("La opción seleccionada no existe, por favor seleccione otra."); mostrarMinimo (elegirTipoDato());break;}} float Minimo (float Vector[],int nElementos,int *Posicion){ int i; float minimo; *Posicion=0; minimo=Vector[0]; for(i=1;i<nElementos;i++){ if(minimo>Vector[i]){ minimo=Vector[i]; *Posicion=i;}} return(minimo);} int elegirTipoDato (void){ int opcion; printf("\nElija el tipo de dato: "); printf("\n\t0-Temperatura."); printf("\n\t1-Precipitaciones."); printf("\n\t2-Viento."); printf("\nTipo de dato elegido: "); scanf("%d",&opcion); return(opcion);} int main (void){ int opcion,x,i,j; char nombrefichero[50]; opcion=1; x=1; while(x!=0){ printf("\nIntroduzca la ruta o nombre del fichero si se encuentra en este mismo directorio(si no existe se creará): "); scanf("%s",nombrefichero); FILE *fich; fich=fopen(nombrefichero,"r"); if(fich!=NULL){ int feof(FILE *fich); fscanf(fich,"%d\n",&NumeroLugares); for(i=0;i<NumeroLugares;i++){ fscanf(fich," %[^\n]\n",NombreLugar[i]);} while(feof(fich)==0){ fscanf(fich,"%d %d %d\n",&FechaObservacion[NumeroFechas].dia,&FechaObservacion[NumeroFechas].mes,&FechaObservacion[NumeroFechas].anio); for(i=0;i<NumeroLugares;i++){ fscanf(fich,"%f %f %f\n",&Observacion[i][NumeroFechas].temperatura,&Observacion[i][NumeroFechas].precipitacion,&Observacion[i][NumeroFechas].viento);} NumeroFechas++;} fclose(fich); x=0;} else{ printf("\nNo existe el fichero introducido asi que se la estructura de datos se iniciará vacia"); fich=fopen(nombrefichero,"a"); if(fich!=NULL){ fclose(fich); x=0;} else{printf("\nNo tiene permisos para crear o abrir el fichero");}}} while(opcion!=0){ printf("\nOpciones: "); printf("\n\t0-Salir."); printf("\n\t1-Introducir nuevo lugar."); printf("\n\t2-Introducir nueva fecha."); printf("\n\t3-Mostrar valores medios."); printf("\n\t4-Mostrar valores máximos."); printf("\n\t5-Mostrar valores mínimos."); printf("\nOpción introducida: "); scanf("%d",&opcion); switch(opcion){ case 0:break; case 1:nuevolugar();break; case 2:nuevafecha();break; case 3:mostrarMedia (elegirTipoDato());break; case 4:mostrarMaximo (elegirTipoDato());break; case 5:mostrarMinimo (elegirTipoDato());break; default:printf("La opción seleccionada no existe.");break;}} x=1; while(x!=0){ FILE *fich; fich=fopen(nombrefichero,"w"); if(fich!=NULL){ int feof(FILE *fich); fprintf(fich,"%d\n",NumeroLugares); for(i=0;i<NumeroLugares;i++){ fprintf(fich,"%s\n",NombreLugar[i]);} for(j=0;j<NumeroFechas;j++){ fprintf(fich,"%d %d %d\n",FechaObservacion[j].dia,FechaObservacion[j].mes,FechaObservacion[j].anio); for(i=0;i<NumeroLugares;i++){ fprintf(fich,"%.1f %.1f %.1f\n",Observacion[i][j].temperatura,Observacion[i][j].precipitacion,Observacion[i][j].viento);}} x=0; fclose(fich);} else{ printf("\nNo tiene permisos para crear o modificar el fichero, introduzca una nueva ruta:"); scanf("%s",nombrefichero);}} printf("El fichero se ha sido guardado\n");}
38.902542
249
0.660059
[ "vector" ]
aefb0386c3dbc93a1b3c94e133d9ae5aa9e0e264
1,642
h
C
json/common_specificationparser.h
eth-srl/2Nice
4e02d0d37e25fbde06dbb07c4475110b95a40157
[ "Apache-2.0" ]
104
2018-06-05T01:51:15.000Z
2022-03-22T08:35:31.000Z
json/common_specificationparser.h
eth-srl/2Nice
4e02d0d37e25fbde06dbb07c4475110b95a40157
[ "Apache-2.0" ]
6
2018-11-22T12:50:39.000Z
2021-06-05T07:53:52.000Z
json/common_specificationparser.h
eth-srl/2Nice
4e02d0d37e25fbde06dbb07c4475110b95a40157
[ "Apache-2.0" ]
23
2018-06-15T13:12:11.000Z
2022-01-08T11:52:38.000Z
/************************************************************************* * libjson-rpc-cpp ************************************************************************* * @file specificationparser.h * @date 12.03.2013 * @author Peter Spiess-Knafl <peter.knafl@gmail.com> * @license See attached LICENSE.txt ************************************************************************/ #ifndef JSONRPC_CPP_SPECIFICATIONPARSER_H #define JSONRPC_CPP_SPECIFICATIONPARSER_H #include "json/common_procedure.h" #include "json/common_exception.h" namespace jsonrpc { class SpecificationParser { public: static std::vector<Procedure> GetProceduresFromFile(const std::string& filename) throw (JsonRpcException); static std::vector<Procedure> GetProceduresFromString(const std::string& spec) throw (JsonRpcException); static void GetFileContent (const std::string& filename, std::string& target); private: static void GetProcedure (Json::Value& val, Procedure &target); static void GetMethod (Json::Value& val, Procedure &target); static void GetNotification (Json::Value& val, Procedure &target); static jsontype_t toJsonType (Json::Value& val); static void GetPositionalParameters (Json::Value &val, Procedure &target); static void GetNamedParameters (Json::Value &val, Procedure &target); static std::string GetProcedureName (Json::Value &signature); }; } #endif // JSONRPC_CPP_SPECIFICATIONPARSER_H
42.102564
121
0.563946
[ "vector" ]
089b391079730a8c252c540ae073e63e7bcac7bb
35,853
h
C
src/spherical_advection_diffusion/spherical_advection_diffusion_elements.h
pkeuchel/oomph-lib
37c1c61425d6b9ea1c2ddceef63a68a228af6fa4
[ "RSA-MD" ]
4
2020-11-16T12:25:09.000Z
2021-06-29T08:53:25.000Z
src/spherical_advection_diffusion/spherical_advection_diffusion_elements.h
pkeuchel/oomph-lib
37c1c61425d6b9ea1c2ddceef63a68a228af6fa4
[ "RSA-MD" ]
2
2020-05-05T22:41:37.000Z
2020-05-10T14:14:17.000Z
src/spherical_advection_diffusion/spherical_advection_diffusion_elements.h
pkeuchel/oomph-lib
37c1c61425d6b9ea1c2ddceef63a68a228af6fa4
[ "RSA-MD" ]
3
2021-01-31T14:09:20.000Z
2021-06-07T07:20:51.000Z
// LIC// ==================================================================== // LIC// This file forms part of oomph-lib, the object-oriented, // LIC// multi-physics finite-element library, available // LIC// at http://www.oomph-lib.org. // LIC// // LIC// Copyright (C) 2006-2021 Matthias Heil and Andrew Hazel // LIC// // LIC// This library is free software; you can redistribute it and/or // LIC// modify it under the terms of the GNU Lesser General Public // LIC// License as published by the Free Software Foundation; either // LIC// version 2.1 of the License, or (at your option) any later version. // LIC// // LIC// This library is distributed in the hope that it will be useful, // LIC// but WITHOUT ANY WARRANTY; without even the implied warranty of // LIC// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // LIC// Lesser General Public License for more details. // LIC// // LIC// You should have received a copy of the GNU Lesser General Public // LIC// License along with this library; if not, write to the Free Software // LIC// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA // LIC// 02110-1301 USA. // LIC// // LIC// The authors may be contacted at oomph-lib@maths.man.ac.uk. // LIC// // LIC//==================================================================== // Header file for advection diffusion elements in a spherical polar coordinate // system #ifndef OOMPH_SPHERICAL_ADV_DIFF_ELEMENTS_HEADER #define OOMPH_SPHERICAL_ADV_DIFF_ELEMENTS_HEADER // Config header generated by autoconfig #ifdef HAVE_CONFIG_H #include <oomph-lib-config.h> #endif // OOMPH-LIB headers #include "../generic/nodes.h" #include "../generic/Qelements.h" #include "../generic/refineable_elements.h" #include "../generic/oomph_utilities.h" namespace oomph { //============================================================= /// A class for all elements that solve the /// Advection Diffusion equations in a spherical polar coordinate system /// using isoparametric elements. /// \f[ /// Pe \mathbf{w}\cdot(\mathbf{x}) \nabla u = /// \nabla \cdot \left( \nabla u \right) + f(\mathbf{x}) /// \f] /// This contains the generic maths. Shape functions, geometric /// mapping etc. must get implemented in derived class. //============================================================= class SphericalAdvectionDiffusionEquations : public virtual FiniteElement { public: /// Function pointer to source function fct(x,f(x)) -- /// x is a Vector! typedef void (*SphericalAdvectionDiffusionSourceFctPt)( const Vector<double>& x, double& f); /// Function pointer to wind function fct(x,w(x)) -- /// x is a Vector! typedef void (*SphericalAdvectionDiffusionWindFctPt)( const Vector<double>& x, Vector<double>& wind); /// Constructor: Initialise the Source_fct_pt and Wind_fct_pt /// to null and set (pointer to) Peclet number to default SphericalAdvectionDiffusionEquations() : Source_fct_pt(0), Wind_fct_pt(0) { // Set pointer to Peclet number to the default value zero Pe_pt = &Default_peclet_number; PeSt_pt = &Default_peclet_number; } /// Broken copy constructor SphericalAdvectionDiffusionEquations( const SphericalAdvectionDiffusionEquations& dummy) = delete; /// Broken assignment operator void operator=(const SphericalAdvectionDiffusionEquations&) = delete; /// Return the index at which the unknown value /// is stored. The default value, 0, is appropriate for single-physics /// problems, when there is only one variable, the value that satisfies /// the spherical advection-diffusion equation. /// In derived multi-physics elements, this function should be overloaded /// to reflect the chosen storage scheme. Note that these equations require /// that the unknown is always stored at the same index at each node. virtual inline unsigned u_index_spherical_adv_diff() const { return 0; } /// du/dt at local node n. /// Uses suitably interpolated value for hanging nodes. double du_dt_spherical_adv_diff(const unsigned& n) const { // Get the data's timestepper TimeStepper* time_stepper_pt = this->node_pt(n)->time_stepper_pt(); // Initialise dudt double dudt = 0.0; // Loop over the timesteps, if there is a non Steady timestepper if (!time_stepper_pt->is_steady()) { // Find the index at which the variable is stored const unsigned u_nodal_index = u_index_spherical_adv_diff(); // Number of timsteps (past & present) const unsigned n_time = time_stepper_pt->ntstorage(); for (unsigned t = 0; t < n_time; t++) { dudt += time_stepper_pt->weight(1, t) * nodal_value(t, n, u_nodal_index); } } return dudt; } /// Disable ALE -- empty overload to suppress warning. /// ALE isn't implemented anyway void disable_ALE() {} /// Output with default number of plot points void output(std::ostream& outfile) { unsigned nplot = 5; output(outfile, nplot); } /// Output FE representation of soln: r,z,u at /// nplot^2 plot points void output(std::ostream& outfile, const unsigned& nplot); /// C_style output with default number of plot points void output(FILE* file_pt) { unsigned n_plot = 5; output(file_pt, n_plot); } /// C-style output FE representation of soln: r,z,u at /// n_plot^2 plot points void output(FILE* file_pt, const unsigned& n_plot); /// Output exact soln: r,z,u_exact at nplot^2 plot points void output_fct(std::ostream& outfile, const unsigned& nplot, FiniteElement::SteadyExactSolutionFctPt exact_soln_pt); /// Get error against and norm of exact solution void compute_error(std::ostream& outfile, FiniteElement::SteadyExactSolutionFctPt exact_soln_pt, double& error, double& norm); /// Access function: Pointer to source function inline SphericalAdvectionDiffusionSourceFctPt& source_fct_pt() { return Source_fct_pt; } /// Access function: Pointer to source function. Const version inline SphericalAdvectionDiffusionSourceFctPt source_fct_pt() const { return Source_fct_pt; } /// Access function: Pointer to wind function inline SphericalAdvectionDiffusionWindFctPt& wind_fct_pt() { return Wind_fct_pt; } /// Access function: Pointer to wind function. Const version inline SphericalAdvectionDiffusionWindFctPt wind_fct_pt() const { return Wind_fct_pt; } // Access functions for the physical constants /// Peclet number inline const double& pe() const { return *Pe_pt; } /// Pointer to Peclet number inline double*& pe_pt() { return Pe_pt; } /// Peclet number multiplied by Strouhal number inline const double& pe_st() const { return *PeSt_pt; } /// Pointer to Peclet number multipled by Strouha number inline double*& pe_st_pt() { return PeSt_pt; } /// Get source term at (Eulerian) position x. This function is /// virtual to allow overloading in multi-physics problems where /// the strength of the source function might be determined by /// another system of equations inline virtual void get_source_spherical_adv_diff(const unsigned& ipt, const Vector<double>& x, double& source) const { // If no source function has been set, return zero if (Source_fct_pt == 0) { source = 0.0; } else { // Get source strength (*Source_fct_pt)(x, source); } } /// Get wind at (Eulerian) position x and/or local coordinate s. /// This function is /// virtual to allow overloading in multi-physics problems where /// the wind function might be determined by /// another system of equations inline virtual void get_wind_spherical_adv_diff(const unsigned& ipt, const Vector<double>& s, const Vector<double>& x, Vector<double>& wind) const { // If no wind function has been set, return zero if (Wind_fct_pt == 0) { for (unsigned i = 0; i < 3; i++) { wind[i] = 0.0; } } else { // Get wind (*Wind_fct_pt)(x, wind); } } /// Get flux: /// \f[ /// \mbox{flux}[i] = \nabla u = \mbox{d}u / \mbox{d} r /// + 1/r \mbox{d}u / \mbox{d} \theta /// \f] void get_flux(const Vector<double>& s, Vector<double>& flux) const { // Find out how many nodes there are in the element const unsigned n_node = nnode(); // Get the nodal index at which the unknown is stored const unsigned u_nodal_index = u_index_spherical_adv_diff(); // Set up memory for the shape and test functions Shape psi(n_node); DShape dpsidx(n_node, 2); // Call the derivatives of the shape and test functions dshape_eulerian(s, psi, dpsidx); // Initialise to zero for (unsigned j = 0; j < 2; j++) { flux[j] = 0.0; } // Loop over nodes for (unsigned l = 0; l < n_node; l++) { const double u_value = this->nodal_value(l, u_nodal_index); const double r = this->nodal_position(l, 0); // Add in the derivative directions flux[0] += u_value * dpsidx(l, 0); flux[1] += u_value * dpsidx(l, 1) / r; } } /// Add the element's contribution to its residual vector (wrapper) void fill_in_contribution_to_residuals(Vector<double>& residuals) { // Call the generic residuals function with flag set to 0 and using // a dummy matrix fill_in_generic_residual_contribution_spherical_adv_diff( residuals, GeneralisedElement::Dummy_matrix, GeneralisedElement::Dummy_matrix, 0); } /// Add the element's contribution to its residual vector and /// the element Jacobian matrix (wrapper) void fill_in_contribution_to_jacobian(Vector<double>& residuals, DenseMatrix<double>& jacobian) { // Call the generic routine with the flag set to 1 fill_in_generic_residual_contribution_spherical_adv_diff( residuals, jacobian, GeneralisedElement::Dummy_matrix, 1); } /// Add the element's contribution to its residual vector and /// the element Jacobian matrix (wrapper) and mass matrix void fill_in_contribution_to_jacobian_and_mass_matrix( Vector<double>& residuals, DenseMatrix<double>& jacobian, DenseMatrix<double>& mass_matrix) { // Call the generic routine with the flag set to 2 fill_in_generic_residual_contribution_spherical_adv_diff( residuals, jacobian, mass_matrix, 2); } /// Return FE representation of function value u(s) at local coordinate s inline double interpolated_u_spherical_adv_diff( const Vector<double>& s) const { // Find number of nodes const unsigned n_node = nnode(); // Get the nodal index at which the unknown is stored const unsigned u_nodal_index = u_index_spherical_adv_diff(); // Local shape function Shape psi(n_node); // Find values of shape function shape(s, psi); // Initialise value of u double interpolated_u = 0.0; // Loop over the local nodes and sum for (unsigned l = 0; l < n_node; l++) { interpolated_u += nodal_value(l, u_nodal_index) * psi[l]; } return (interpolated_u); } /// Return derivative of u at point s with respect to all data /// that can affect its value. /// In addition, return the global equation numbers corresponding to the /// data. This is virtual so that it can be overloaded in the /// refineable version virtual void dinterpolated_u_adv_diff_ddata( const Vector<double>& s, Vector<double>& du_ddata, Vector<unsigned>& global_eqn_number) { // Find number of nodes const unsigned n_node = nnode(); // Get the nodal index at which the unknown is stored const unsigned u_nodal_index = u_index_spherical_adv_diff(); // Local shape function Shape psi(n_node); // Find values of shape function shape(s, psi); // Find the number of dofs associated with interpolated u unsigned n_u_dof = 0; for (unsigned l = 0; l < n_node; l++) { int global_eqn = this->node_pt(l)->eqn_number(u_nodal_index); // If it's positive add to the count if (global_eqn >= 0) { ++n_u_dof; } } // Now resize the storage schemes du_ddata.resize(n_u_dof, 0.0); global_eqn_number.resize(n_u_dof, 0); // Loop over the nodes again and set the derivatives unsigned count = 0; for (unsigned l = 0; l < n_node; l++) { // Get the global equation number int global_eqn = this->node_pt(l)->eqn_number(u_nodal_index); // If it's positive if (global_eqn >= 0) { // Set the global equation number global_eqn_number[count] = global_eqn; // Set the derivative with respect to the unknown du_ddata[count] = psi[l]; // Increase the counter ++count; } } } /// Self-test: Return 0 for OK unsigned self_test(); protected: /// Shape/test functions and derivs w.r.t. to global coords at /// local coord. s; return Jacobian of mapping virtual double dshape_and_dtest_eulerian_spherical_adv_diff( const Vector<double>& s, Shape& psi, DShape& dpsidx, Shape& test, DShape& dtestdx) const = 0; /// Shape/test functions and derivs w.r.t. to global coords at /// integration point ipt; return Jacobian of mapping virtual double dshape_and_dtest_eulerian_at_knot_spherical_adv_diff( const unsigned& ipt, Shape& psi, DShape& dpsidx, Shape& test, DShape& dtestdx) const = 0; /// Add the element's contribution to its residual vector only /// (if flag=and/or element Jacobian matrix virtual void fill_in_generic_residual_contribution_spherical_adv_diff( Vector<double>& residuals, DenseMatrix<double>& jacobian, DenseMatrix<double>& mass_matrix, unsigned flag); // Physical constants /// Pointer to global Peclet number double* Pe_pt; /// Pointer to global Peclet number multiplied by Strouhal number double* PeSt_pt; /// Pointer to source function: SphericalAdvectionDiffusionSourceFctPt Source_fct_pt; /// Pointer to wind function: SphericalAdvectionDiffusionWindFctPt Wind_fct_pt; private: /// Static default value for the Peclet number static double Default_peclet_number; }; // End class SphericalAdvectionDiffusionEquations /// //////////////////////////////////////////////////////////////////////// /// //////////////////////////////////////////////////////////////////////// /// //////////////////////////////////////////////////////////////////////// //====================================================================== /// QSphericalAdvectionDiffusionElement elements are /// linear/quadrilateral/brick-shaped Axisymmetric Advection Diffusion /// elements with isoparametric interpolation for the function. //====================================================================== template<unsigned NNODE_1D> class QSphericalAdvectionDiffusionElement : public virtual QElement<2, NNODE_1D>, public virtual SphericalAdvectionDiffusionEquations { private: /// Static array of ints to hold number of variables at /// nodes: Initial_Nvalue[n] static const unsigned Initial_Nvalue; public: /// Constructor: Call constructors for QElement and /// Advection Diffusion equations QSphericalAdvectionDiffusionElement() : QElement<2, NNODE_1D>(), SphericalAdvectionDiffusionEquations() { } /// Broken copy constructor QSphericalAdvectionDiffusionElement( const QSphericalAdvectionDiffusionElement<NNODE_1D>& dummy) = delete; /// Broken assignment operator void operator=(const QSphericalAdvectionDiffusionElement<NNODE_1D>&) = delete; /// Required # of `values' (pinned or dofs) /// at node n inline unsigned required_nvalue(const unsigned& n) const { return Initial_Nvalue; } /// Output function: /// r,z,u void output(std::ostream& outfile) { SphericalAdvectionDiffusionEquations::output(outfile); } /// Output function: /// r,z,u at n_plot^2 plot points void output(std::ostream& outfile, const unsigned& n_plot) { SphericalAdvectionDiffusionEquations::output(outfile, n_plot); } /// C-style output function: /// r,z,u void output(FILE* file_pt) { SphericalAdvectionDiffusionEquations::output(file_pt); } /// C-style output function: /// r,z,u at n_plot^2 plot points void output(FILE* file_pt, const unsigned& n_plot) { SphericalAdvectionDiffusionEquations::output(file_pt, n_plot); } /// Output function for an exact solution: /// r,z,u_exact at n_plot^2 plot points void output_fct(std::ostream& outfile, const unsigned& n_plot, FiniteElement::SteadyExactSolutionFctPt exact_soln_pt) { SphericalAdvectionDiffusionEquations::output_fct( outfile, n_plot, exact_soln_pt); } protected: /// Shape, test functions & derivs. w.r.t. to global coords. Return /// Jacobian. inline double dshape_and_dtest_eulerian_spherical_adv_diff( const Vector<double>& s, Shape& psi, DShape& dpsidx, Shape& test, DShape& dtestdx) const; /// Shape, test functions & derivs. w.r.t. to global coords. at /// integration point ipt. Return Jacobian. inline double dshape_and_dtest_eulerian_at_knot_spherical_adv_diff( const unsigned& ipt, Shape& psi, DShape& dpsidx, Shape& test, DShape& dtestdx) const; }; // End class QSphericalAdvectionDiffusionElement // Inline functions: //====================================================================== /// Define the shape functions and test functions and derivatives /// w.r.t. global coordinates and return Jacobian of mapping. /// /// Galerkin: Test functions = shape functions //====================================================================== template<unsigned NNODE_1D> double QSphericalAdvectionDiffusionElement<NNODE_1D>:: dshape_and_dtest_eulerian_spherical_adv_diff(const Vector<double>& s, Shape& psi, DShape& dpsidx, Shape& test, DShape& dtestdx) const { // Call the geometrical shape functions and derivatives double J = this->dshape_eulerian(s, psi, dpsidx); // Loop over the test functions and derivatives and set them equal to the // shape functions for (unsigned i = 0; i < NNODE_1D; i++) { test[i] = psi[i]; for (unsigned j = 0; j < 2; j++) { dtestdx(i, j) = dpsidx(i, j); } } // Return the jacobian return J; } //====================================================================== /// Define the shape functions and test functions and derivatives /// w.r.t. global coordinates and return Jacobian of mapping. /// /// Galerkin: Test functions = shape functions //====================================================================== template<unsigned NNODE_1D> double QSphericalAdvectionDiffusionElement<NNODE_1D>:: dshape_and_dtest_eulerian_at_knot_spherical_adv_diff(const unsigned& ipt, Shape& psi, DShape& dpsidx, Shape& test, DShape& dtestdx) const { // Call the geometrical shape functions and derivatives double J = this->dshape_eulerian_at_knot(ipt, psi, dpsidx); // Set the test functions equal to the shape functions (pointer copy) test = psi; dtestdx = dpsidx; // Return the jacobian return J; } /// ///////////////////////////////////////////////////////////////////// /// ///////////////////////////////////////////////////////////////////// /// ///////////////////////////////////////////////////////////////////// template<unsigned NNODE_1D> class FaceGeometry<QSphericalAdvectionDiffusionElement<NNODE_1D>> : public virtual QElement<1, NNODE_1D> { public: /// Constructor: Call the constructor for the /// appropriate lower-dimensional QElement FaceGeometry() : QElement<1, NNODE_1D>() {} }; /// ///////////////////////////////////////////////////////////////////// /// ///////////////////////////////////////////////////////////////////// /// ///////////////////////////////////////////////////////////////////// //====================================================================== /// A class for elements that allow the imposition of an /// applied Robin boundary condition on the boundaries of Steady /// Axisymmnetric Advection Diffusion Flux elements. /// \f[ /// -\Delta u \cdot \mathbf{n} + \alpha(r,z) u = \beta(r,z) /// \f] /// The element geometry is obtained from the FaceGeometry<ELEMENT> /// policy class. //====================================================================== template<class ELEMENT> class SphericalAdvectionDiffusionFluxElement : public virtual FaceGeometry<ELEMENT>, public virtual FaceElement { public: /// Function pointer to the prescribed-beta function fct(x,beta(x)) /// -- x is a Vector! typedef void (*SphericalAdvectionDiffusionPrescribedBetaFctPt)( const Vector<double>& x, double& beta); /// Function pointer to the prescribed-alpha function fct(x,alpha(x)) /// -- x is a Vector! typedef void (*SphericalAdvectionDiffusionPrescribedAlphaFctPt)( const Vector<double>& x, double& alpha); /// Constructor, takes the pointer to the "bulk" element /// and the index of the face to be created SphericalAdvectionDiffusionFluxElement(FiniteElement* const& bulk_el_pt, const int& face_index); /// Broken empty constructor SphericalAdvectionDiffusionFluxElement() { throw OomphLibError("Don't call empty constructor for " "SphericalAdvectionDiffusionFluxElement", OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION); } /// Broken copy constructor SphericalAdvectionDiffusionFluxElement( const SphericalAdvectionDiffusionFluxElement& dummy) = delete; /// Broken assignment operator void operator=(const SphericalAdvectionDiffusionFluxElement&) = delete; /// Access function for the prescribed-beta function pointer SphericalAdvectionDiffusionPrescribedBetaFctPt& beta_fct_pt() { return Beta_fct_pt; } /// Access function for the prescribed-alpha function pointer SphericalAdvectionDiffusionPrescribedAlphaFctPt& alpha_fct_pt() { return Alpha_fct_pt; } /// Add the element's contribution to its residual vector inline void fill_in_contribution_to_residuals(Vector<double>& residuals) { // Call the generic residuals function with flag set to 0 // using a dummy matrix fill_in_generic_residual_contribution_spherical_adv_diff_flux( residuals, GeneralisedElement::Dummy_matrix, 0); } /// Add the element's contribution to its residual vector and /// its Jacobian matrix inline void fill_in_contribution_to_jacobian(Vector<double>& residuals, DenseMatrix<double>& jacobian) { // Call the generic routine with the flag set to 1 fill_in_generic_residual_contribution_spherical_adv_diff_flux( residuals, jacobian, 1); } /// Specify the value of nodal zeta from the face geometry /// The "global" intrinsic coordinate of the element when /// viewed as part of a geometric object should be given by /// the FaceElement representation, by default (needed to break /// indeterminacy if bulk element is SolidElement) double zeta_nodal(const unsigned& n, const unsigned& k, const unsigned& i) const { return FaceElement::zeta_nodal(n, k, i); } /// Output function -- forward to broken version in FiniteElement /// until somebody decides what exactly they want to plot here... void output(std::ostream& outfile) { FiniteElement::output(outfile); } /// Output function -- forward to broken version in FiniteElement /// until somebody decides what exactly they want to plot here... void output(std::ostream& outfile, const unsigned& nplot) { FiniteElement::output(outfile, nplot); } protected: /// Function to compute the shape and test functions and to return /// the Jacobian of mapping between local and global (Eulerian) /// coordinates inline double shape_and_test(const Vector<double>& s, Shape& psi, Shape& test) const { // Find number of nodes unsigned n_node = nnode(); // Get the shape functions shape(s, psi); // Set the test functions to be the same as the shape functions for (unsigned i = 0; i < n_node; i++) { test[i] = psi[i]; } // Return the value of the jacobian return J_eulerian(s); } /// Function to compute the shape and test functions and to return /// the Jacobian of mapping between local and global (Eulerian) /// coordinates inline double shape_and_test_at_knot(const unsigned& ipt, Shape& psi, Shape& test) const { // Find number of nodes unsigned n_node = nnode(); // Get the shape functions shape_at_knot(ipt, psi); // Set the test functions to be the same as the shape functions for (unsigned i = 0; i < n_node; i++) { test[i] = psi[i]; } // Return the value of the jacobian return J_eulerian_at_knot(ipt); } /// Function to calculate the prescribed beta at a given spatial /// position void get_beta(const Vector<double>& x, double& beta) { // If the function pointer is zero return zero if (Beta_fct_pt == 0) { beta = 0.0; } // Otherwise call the function else { (*Beta_fct_pt)(x, beta); } } /// Function to calculate the prescribed alpha at a given spatial /// position void get_alpha(const Vector<double>& x, double& alpha) { // If the function pointer is zero return zero if (Alpha_fct_pt == 0) { alpha = 0.0; } // Otherwise call the function else { (*Alpha_fct_pt)(x, alpha); } } private: /// Add the element's contribution to its residual vector. /// flag=1(or 0): do (or don't) compute the Jacobian as well. void fill_in_generic_residual_contribution_spherical_adv_diff_flux( Vector<double>& residuals, DenseMatrix<double>& jacobian, unsigned flag); /// Function pointer to the (global) prescribed-beta function SphericalAdvectionDiffusionPrescribedBetaFctPt Beta_fct_pt; /// Function pointer to the (global) prescribed-alpha function SphericalAdvectionDiffusionPrescribedAlphaFctPt Alpha_fct_pt; /// The index at which the unknown is stored at the nodes unsigned U_index_adv_diff; }; // End class SphericalAdvectionDiffusionFluxElement /// //////////////////////////////////////////////////////////////////// /// //////////////////////////////////////////////////////////////////// /// //////////////////////////////////////////////////////////////////// //=========================================================================== /// Constructor, takes the pointer to the "bulk" element and the index /// of the face to be created //=========================================================================== template<class ELEMENT> SphericalAdvectionDiffusionFluxElement<ELEMENT>:: SphericalAdvectionDiffusionFluxElement(FiniteElement* const& bulk_el_pt, const int& face_index) : FaceGeometry<ELEMENT>(), FaceElement() { // Let the bulk element build the FaceElement, i.e. setup the pointers // to its nodes (by referring to the appropriate nodes in the bulk // element), etc. bulk_el_pt->build_face_element(face_index, this); #ifdef PARANOID { // Check that the element is not a refineable 3d element ELEMENT* elem_pt = dynamic_cast<ELEMENT*>(bulk_el_pt); // If it's three-d if (elem_pt->dim() == 3) { // Is it refineable RefineableElement* ref_el_pt = dynamic_cast<RefineableElement*>(elem_pt); if (ref_el_pt != 0) { if (this->has_hanging_nodes()) { throw OomphLibError("This flux element will not work correctly if " "nodes are hanging\n", OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION); } } } } #endif // Initialise the prescribed-beta function pointer to zero Beta_fct_pt = 0; // Set up U_index_adv_diff. Initialise to zero, which probably won't change // in most cases, oh well, the price we pay for generality U_index_adv_diff = 0; // Cast to the appropriate AdvectionDiffusionEquation so that we can // find the index at which the variable is stored // We assume that the dimension of the full problem is the same // as the dimension of the node, if this is not the case you will have // to write custom elements, sorry SphericalAdvectionDiffusionEquations* eqn_pt = dynamic_cast<SphericalAdvectionDiffusionEquations*>(bulk_el_pt); // If the cast has failed die if (eqn_pt == 0) { std::string error_string = "Bulk element must inherit from SphericalAdvectionDiffusionEquations."; error_string += "Nodes are two dimensional, but cannot cast the bulk element to\n"; error_string += "SphericalAdvectionDiffusionEquations<2>\n."; error_string += "If you desire this functionality, you must implement it yourself\n"; throw OomphLibError( error_string, OOMPH_CURRENT_FUNCTION, OOMPH_EXCEPTION_LOCATION); } else { // Read the index from the (cast) bulk element. U_index_adv_diff = eqn_pt->u_index_spherical_adv_diff(); } } //=========================================================================== /// Compute the element's residual vector and the (zero) Jacobian /// matrix for the Robin boundary condition: /// \f[ /// \Delta u \cdot \mathbf{n} + \alpha (\mathbf{x}) = \beta (\mathbf{x}) /// \f] //=========================================================================== template<class ELEMENT> void SphericalAdvectionDiffusionFluxElement<ELEMENT>:: fill_in_generic_residual_contribution_spherical_adv_diff_flux( Vector<double>& residuals, DenseMatrix<double>& jacobian, unsigned flag) { // Find out how many nodes there are const unsigned n_node = nnode(); // Locally cache the index at which the variable is stored const unsigned u_index_spherical_adv_diff = U_index_adv_diff; // Set up memory for the shape and test functions Shape psif(n_node), testf(n_node); // Set the value of n_intpt const unsigned n_intpt = integral_pt()->nweight(); // Set the Vector to hold local coordinates Vector<double> s(1); // Integers used to store the local equation number and local unknown // indices for the residuals and jacobians int local_eqn = 0, local_unknown = 0; // Loop over the integration points //-------------------------------- for (unsigned ipt = 0; ipt < n_intpt; ipt++) { // Assign values of s for (unsigned i = 0; i < 1; i++) { s[i] = integral_pt()->knot(ipt, i); } // Get the integral weight double w = integral_pt()->weight(ipt); // Find the shape and test functions and return the Jacobian // of the mapping double J = shape_and_test(s, psif, testf); // Premultiply the weights and the Jacobian double W = w * J; // Calculate local values of the solution and its derivatives // Allocate double interpolated_u = 0.0; Vector<double> interpolated_x(2, 0.0); // Calculate position for (unsigned l = 0; l < n_node; l++) { // Get the value at the node double u_value = raw_nodal_value(l, u_index_spherical_adv_diff); interpolated_u += u_value * psif(l); // Loop over coordinate direction for (unsigned i = 0; i < 2; i++) { interpolated_x[i] += nodal_position(l, i) * psif(l); } } // Get the imposed beta (beta=flux when alpha=0.0) double beta; get_beta(interpolated_x, beta); // Get the imposed alpha double alpha; get_alpha(interpolated_x, alpha); // calculate the area weighting dS = r^{2} sin theta dr dtheta // r = x[0] and theta = x[1] double dS = interpolated_x[0] * interpolated_x[0] * sin(interpolated_x[1]); // Now add to the appropriate equations // Loop over the test functions for (unsigned l = 0; l < n_node; l++) { // Set the local equation number local_eqn = nodal_local_eqn(l, u_index_spherical_adv_diff); /*IF it's not a boundary condition*/ if (local_eqn >= 0) { // Add the prescribed beta terms residuals[local_eqn] -= dS * (beta - alpha * interpolated_u) * testf(l) * W; // Calculate the Jacobian //---------------------- if ((flag) && (alpha != 0.0)) { // Loop over the velocity shape functions again for (unsigned l2 = 0; l2 < n_node; l2++) { // Set the number of the unknown local_unknown = nodal_local_eqn(l2, u_index_spherical_adv_diff); // If at a non-zero degree of freedom add in the entry if (local_unknown >= 0) { jacobian(local_eqn, local_unknown) += dS * alpha * psif[l2] * testf[l] * W; } } } } } // end loop over test functions } // end loop over integration points } // end fill_in_generic_residual_contribution_adv_diff_flux } // namespace oomph #endif
33.197222
79
0.59426
[ "geometry", "object", "shape", "vector", "3d" ]