text
stringlengths
5
1.04M
/* By downloading, copying, installing or using the software you agree to this license. If you do not agree to this license, do not download, install, copy or use the software. License Agreement For Open Source Computer Vision Library (3-clause BSD License) Copyright (C) 2013, OpenCV Foundation, all rights reserved. Third party copyrights are property of their respective owners. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the names of the copyright holders nor the names of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided by the copyright holders and contributors "as is" and any express or implied warranties, including, but not limited to, the implied warranties of merchantability and fitness for a particular purpose are disclaimed. In no event shall copyright holders or contributors be liable for any direct, indirect, incidental, special, exemplary, or consequential damages (including, but not limited to, procurement of substitute goods or services; loss of use, data, or profits; or business interruption) however caused and on any theory of liability, whether in contract, strict liability, or tort (including negligence or otherwise) arising in any way out of the use of this software, even if advised of the possibility of such damage. */ #include "precomp.hpp" #include "opencv2/aruco.hpp" #include <opencv2/core.hpp> #include <opencv2/imgproc.hpp> namespace cv { namespace aruco { using namespace std; /** * */ DetectorParameters::DetectorParameters() : adaptiveThreshWinSizeMin(3), adaptiveThreshWinSizeMax(23), adaptiveThreshWinSizeStep(10), adaptiveThreshConstant(7), minMarkerPerimeterRate(0.03), maxMarkerPerimeterRate(4.), polygonalApproxAccuracyRate(0.03), minCornerDistanceRate(0.05), minDistanceToBorder(3), minMarkerDistanceRate(0.05), doCornerRefinement(false), cornerRefinementWinSize(5), cornerRefinementMaxIterations(30), cornerRefinementMinAccuracy(0.1), markerBorderBits(1), perspectiveRemovePixelPerCell(4), perspectiveRemoveIgnoredMarginPerCell(0.13), maxErroneousBitsInBorderRate(0.35), minOtsuStdDev(5.0), errorCorrectionRate(0.6) {} /** * @brief Create a new set of DetectorParameters with default values. */ Ptr<DetectorParameters> DetectorParameters::create() { Ptr<DetectorParameters> params = makePtr<DetectorParameters>(); return params; } /** * @brief Convert input image to gray if it is a 3-channels image */ static void _convertToGrey(InputArray _in, OutputArray _out) { CV_Assert(_in.getMat().channels() == 1 || _in.getMat().channels() == 3); _out.create(_in.getMat().size(), CV_8UC1); if(_in.getMat().type() == CV_8UC3) cvtColor(_in.getMat(), _out.getMat(), COLOR_BGR2GRAY); else _in.getMat().copyTo(_out); } /** * @brief Threshold input image using adaptive thresholding */ static void _threshold(InputArray _in, OutputArray _out, int winSize, double constant) { CV_Assert(winSize >= 3); if(winSize % 2 == 0) winSize++; // win size must be odd adaptiveThreshold(_in, _out, 255, ADAPTIVE_THRESH_MEAN_C, THRESH_BINARY_INV, winSize, constant); } /** * @brief Given a tresholded image, find the contours, calculate their polygonal approximation * and take those that accomplish some conditions */ static void _findMarkerContours(InputArray _in, vector< vector< Point2f > > &candidates, vector< vector< Point > > &contoursOut, double minPerimeterRate, double maxPerimeterRate, double accuracyRate, double minCornerDistanceRate, int minDistanceToBorder) { CV_Assert(minPerimeterRate > 0 && maxPerimeterRate > 0 && accuracyRate > 0 && minCornerDistanceRate >= 0 && minDistanceToBorder >= 0); // calculate maximum and minimum sizes in pixels unsigned int minPerimeterPixels = (unsigned int)(minPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); unsigned int maxPerimeterPixels = (unsigned int)(maxPerimeterRate * max(_in.getMat().cols, _in.getMat().rows)); Mat contoursImg; _in.getMat().copyTo(contoursImg); vector< vector< Point > > contours; findContours(contoursImg, contours, RETR_LIST, CHAIN_APPROX_NONE); // now filter list of contours for(unsigned int i = 0; i < contours.size(); i++) { // check perimeter if(contours[i].size() < minPerimeterPixels || contours[i].size() > maxPerimeterPixels) continue; // check is square and is convex vector< Point > approxCurve; approxPolyDP(contours[i], approxCurve, double(contours[i].size()) * accuracyRate, true); if(approxCurve.size() != 4 || !isContourConvex(approxCurve)) continue; // check min distance between corners double minDistSq = max(contoursImg.cols, contoursImg.rows) * max(contoursImg.cols, contoursImg.rows); for(int j = 0; j < 4; j++) { double d = (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) * (double)(approxCurve[j].x - approxCurve[(j + 1) % 4].x) + (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y) * (double)(approxCurve[j].y - approxCurve[(j + 1) % 4].y); minDistSq = min(minDistSq, d); } double minCornerDistancePixels = double(contours[i].size()) * minCornerDistanceRate; if(minDistSq < minCornerDistancePixels * minCornerDistancePixels) continue; // check if it is too near to the image border bool tooNearBorder = false; for(int j = 0; j < 4; j++) { if(approxCurve[j].x < minDistanceToBorder || approxCurve[j].y < minDistanceToBorder || approxCurve[j].x > contoursImg.cols - 1 - minDistanceToBorder || approxCurve[j].y > contoursImg.rows - 1 - minDistanceToBorder) tooNearBorder = true; } if(tooNearBorder) continue; // if it passes all the test, add to candidates vector vector< Point2f > currentCandidate; currentCandidate.resize(4); for(int j = 0; j < 4; j++) { currentCandidate[j] = Point2f((float)approxCurve[j].x, (float)approxCurve[j].y); } candidates.push_back(currentCandidate); contoursOut.push_back(contours[i]); } } /** * @brief Assure order of candidate corners is clockwise direction */ static void _reorderCandidatesCorners(vector< vector< Point2f > > &candidates) { for(unsigned int i = 0; i < candidates.size(); i++) { double dx1 = candidates[i][1].x - candidates[i][0].x; double dy1 = candidates[i][1].y - candidates[i][0].y; double dx2 = candidates[i][2].x - candidates[i][0].x; double dy2 = candidates[i][2].y - candidates[i][0].y; double crossProduct = (dx1 * dy2) - (dy1 * dx2); if(crossProduct < 0.0) { // not clockwise direction swap(candidates[i][1], candidates[i][3]); } } } /** * @brief Check candidates that are too close to each other and remove the smaller one */ static void _filterTooCloseCandidates(const vector< vector< Point2f > > &candidatesIn, vector< vector< Point2f > > &candidatesOut, const vector< vector< Point > > &contoursIn, vector< vector< Point > > &contoursOut, double minMarkerDistanceRate) { CV_Assert(minMarkerDistanceRate >= 0); vector< pair< int, int > > nearCandidates; for(unsigned int i = 0; i < candidatesIn.size(); i++) { for(unsigned int j = i + 1; j < candidatesIn.size(); j++) { int minimumPerimeter = min((int)contoursIn[i].size(), (int)contoursIn[j].size() ); // fc is the first corner considered on one of the markers, 4 combinations are possible for(int fc = 0; fc < 4; fc++) { double distSq = 0; for(int c = 0; c < 4; c++) { // modC is the corner considering first corner is fc int modC = (c + fc) % 4; distSq += (candidatesIn[i][modC].x - candidatesIn[j][c].x) * (candidatesIn[i][modC].x - candidatesIn[j][c].x) + (candidatesIn[i][modC].y - candidatesIn[j][c].y) * (candidatesIn[i][modC].y - candidatesIn[j][c].y); } distSq /= 4.; // if mean square distance is too low, remove the smaller one of the two markers double minMarkerDistancePixels = double(minimumPerimeter) * minMarkerDistanceRate; if(distSq < minMarkerDistancePixels * minMarkerDistancePixels) { nearCandidates.push_back(pair< int, int >(i, j)); break; } } } } // mark smaller one in pairs to remove vector< bool > toRemove(candidatesIn.size(), false); for(unsigned int i = 0; i < nearCandidates.size(); i++) { // if one of the marker has been already markerd to removed, dont need to do anything if(toRemove[nearCandidates[i].first] || toRemove[nearCandidates[i].second]) continue; size_t perimeter1 = contoursIn[nearCandidates[i].first].size(); size_t perimeter2 = contoursIn[nearCandidates[i].second].size(); if(perimeter1 > perimeter2) toRemove[nearCandidates[i].second] = true; else toRemove[nearCandidates[i].first] = true; } // remove extra candidates candidatesOut.clear(); unsigned long totalRemaining = 0; for(unsigned int i = 0; i < toRemove.size(); i++) if(!toRemove[i]) totalRemaining++; candidatesOut.resize(totalRemaining); contoursOut.resize(totalRemaining); for(unsigned int i = 0, currIdx = 0; i < candidatesIn.size(); i++) { if(toRemove[i]) continue; candidatesOut[currIdx] = candidatesIn[i]; contoursOut[currIdx] = contoursIn[i]; currIdx++; } } /** * ParallelLoopBody class for the parallelization of the basic candidate detections using * different threhold window sizes. Called from function _detectInitialCandidates() */ class DetectInitialCandidatesParallel : public ParallelLoopBody { public: DetectInitialCandidatesParallel(const Mat *_grey, vector< vector< vector< Point2f > > > *_candidatesArrays, vector< vector< vector< Point > > > *_contoursArrays, const Ptr<DetectorParameters> &_params) : grey(_grey), candidatesArrays(_candidatesArrays), contoursArrays(_contoursArrays), params(_params) {} void operator()(const Range &range) const { const int begin = range.start; const int end = range.end; for(int i = begin; i < end; i++) { int currScale = params->adaptiveThreshWinSizeMin + i * params->adaptiveThreshWinSizeStep; // threshold Mat thresh; _threshold(*grey, thresh, currScale, params->adaptiveThreshConstant); // detect rectangles _findMarkerContours(thresh, (*candidatesArrays)[i], (*contoursArrays)[i], params->minMarkerPerimeterRate, params->maxMarkerPerimeterRate, params->polygonalApproxAccuracyRate, params->minCornerDistanceRate, params->minDistanceToBorder); } } private: DetectInitialCandidatesParallel &operator=(const DetectInitialCandidatesParallel &); const Mat *grey; vector< vector< vector< Point2f > > > *candidatesArrays; vector< vector< vector< Point > > > *contoursArrays; const Ptr<DetectorParameters> &params; }; /** * @brief Initial steps on finding square candidates */ static void _detectInitialCandidates(const Mat &grey, vector< vector< Point2f > > &candidates, vector< vector< Point > > &contours, const Ptr<DetectorParameters> &params) { CV_Assert(params->adaptiveThreshWinSizeMin >= 3 && params->adaptiveThreshWinSizeMax >= 3); CV_Assert(params->adaptiveThreshWinSizeMax >= params->adaptiveThreshWinSizeMin); CV_Assert(params->adaptiveThreshWinSizeStep > 0); // number of window sizes (scales) to apply adaptive thresholding int nScales = (params->adaptiveThreshWinSizeMax - params->adaptiveThreshWinSizeMin) / params->adaptiveThreshWinSizeStep + 1; vector< vector< vector< Point2f > > > candidatesArrays((size_t) nScales); vector< vector< vector< Point > > > contoursArrays((size_t) nScales); ////for each value in the interval of thresholding window sizes // for(int i = 0; i < nScales; i++) { // int currScale = params.adaptiveThreshWinSizeMin + i*params.adaptiveThreshWinSizeStep; // // treshold // Mat thresh; // _threshold(grey, thresh, currScale, params.adaptiveThreshConstant); // // detect rectangles // _findMarkerContours(thresh, candidatesArrays[i], contoursArrays[i], // params.minMarkerPerimeterRate, // params.maxMarkerPerimeterRate, params.polygonalApproxAccuracyRate, // params.minCornerDistance, params.minDistanceToBorder); //} // this is the parallel call for the previous commented loop (result is equivalent) parallel_for_(Range(0, nScales), DetectInitialCandidatesParallel(&grey, &candidatesArrays, &contoursArrays, params)); // join candidates for(int i = 0; i < nScales; i++) { for(unsigned int j = 0; j < candidatesArrays[i].size(); j++) { candidates.push_back(candidatesArrays[i][j]); contours.push_back(contoursArrays[i][j]); } } } /** * @brief Detect square candidates in the input image */ static void _detectCandidates(InputArray _image, vector< vector< Point2f > >& candidatesOut, vector< vector< Point > >& contoursOut, const Ptr<DetectorParameters> &_params) { Mat image = _image.getMat(); CV_Assert(image.total() != 0); /// 1. CONVERT TO GRAY Mat grey; _convertToGrey(image, grey); vector< vector< Point2f > > candidates; vector< vector< Point > > contours; /// 2. DETECT FIRST SET OF CANDIDATES _detectInitialCandidates(grey, candidates, contours, _params); /// 3. SORT CORNERS _reorderCandidatesCorners(candidates); /// 4. FILTER OUT NEAR CANDIDATE PAIRS _filterTooCloseCandidates(candidates, candidatesOut, contours, contoursOut, _params->minMarkerDistanceRate); } /** * @brief Given an input image and candidate corners, extract the bits of the candidate, including * the border bits */ static Mat _extractBits(InputArray _image, InputArray _corners, int markerSize, int markerBorderBits, int cellSize, double cellMarginRate, double minStdDevOtsu) { CV_Assert(_image.getMat().channels() == 1); CV_Assert(_corners.total() == 4); CV_Assert(markerBorderBits > 0 && cellSize > 0 && cellMarginRate >= 0 && cellMarginRate <= 1); CV_Assert(minStdDevOtsu >= 0); // number of bits in the marker int markerSizeWithBorders = markerSize + 2 * markerBorderBits; int cellMarginPixels = int(cellMarginRate * cellSize); Mat resultImg; // marker image after removing perspective int resultImgSize = markerSizeWithBorders * cellSize; Mat resultImgCorners(4, 1, CV_32FC2); resultImgCorners.ptr< Point2f >(0)[0] = Point2f(0, 0); resultImgCorners.ptr< Point2f >(0)[1] = Point2f((float)resultImgSize - 1, 0); resultImgCorners.ptr< Point2f >(0)[2] = Point2f((float)resultImgSize - 1, (float)resultImgSize - 1); resultImgCorners.ptr< Point2f >(0)[3] = Point2f(0, (float)resultImgSize - 1); // remove perspective Mat transformation = getPerspectiveTransform(_corners, resultImgCorners); warpPerspective(_image, resultImg, transformation, Size(resultImgSize, resultImgSize), INTER_NEAREST); // output image containing the bits Mat bits(markerSizeWithBorders, markerSizeWithBorders, CV_8UC1, Scalar::all(0)); // check if standard deviation is enough to apply Otsu // if not enough, it probably means all bits are the same color (black or white) Mat mean, stddev; // Remove some border just to avoid border noise from perspective transformation Mat innerRegion = resultImg.colRange(cellSize / 2, resultImg.cols - cellSize / 2) .rowRange(cellSize / 2, resultImg.rows - cellSize / 2); meanStdDev(innerRegion, mean, stddev); if(stddev.ptr< double >(0)[0] < minStdDevOtsu) { // all black or all white, depending on mean value if(mean.ptr< double >(0)[0] > 127) bits.setTo(1); else bits.setTo(0); return bits; } // now extract code, first threshold using Otsu threshold(resultImg, resultImg, 125, 255, THRESH_BINARY | THRESH_OTSU); // for each cell for(int y = 0; y < markerSizeWithBorders; y++) { for(int x = 0; x < markerSizeWithBorders; x++) { int Xstart = x * (cellSize) + cellMarginPixels; int Ystart = y * (cellSize) + cellMarginPixels; Mat square = resultImg(Rect(Xstart, Ystart, cellSize - 2 * cellMarginPixels, cellSize - 2 * cellMarginPixels)); // count white pixels on each cell to assign its value size_t nZ = (size_t) countNonZero(square); if(nZ > square.total() / 2) bits.at< unsigned char >(y, x) = 1; } } return bits; } /** * @brief Return number of erroneous bits in border, i.e. number of white bits in border. */ static int _getBorderErrors(const Mat &bits, int markerSize, int borderSize) { int sizeWithBorders = markerSize + 2 * borderSize; CV_Assert(markerSize > 0 && bits.cols == sizeWithBorders && bits.rows == sizeWithBorders); int totalErrors = 0; for(int y = 0; y < sizeWithBorders; y++) { for(int k = 0; k < borderSize; k++) { if(bits.ptr< unsigned char >(y)[k] != 0) totalErrors++; if(bits.ptr< unsigned char >(y)[sizeWithBorders - 1 - k] != 0) totalErrors++; } } for(int x = borderSize; x < sizeWithBorders - borderSize; x++) { for(int k = 0; k < borderSize; k++) { if(bits.ptr< unsigned char >(k)[x] != 0) totalErrors++; if(bits.ptr< unsigned char >(sizeWithBorders - 1 - k)[x] != 0) totalErrors++; } } return totalErrors; } /** * @brief Tries to identify one candidate given the dictionary */ static bool _identifyOneCandidate(const Ptr<Dictionary> &dictionary, InputArray _image, InputOutputArray _corners, int &idx, const Ptr<DetectorParameters> &params) { CV_Assert(_corners.total() == 4); CV_Assert(_image.getMat().total() != 0); CV_Assert(params->markerBorderBits > 0); // get bits Mat candidateBits = _extractBits(_image, _corners, dictionary->markerSize, params->markerBorderBits, params->perspectiveRemovePixelPerCell, params->perspectiveRemoveIgnoredMarginPerCell, params->minOtsuStdDev); // analyze border bits int maximumErrorsInBorder = int(dictionary->markerSize * dictionary->markerSize * params->maxErroneousBitsInBorderRate); int borderErrors = _getBorderErrors(candidateBits, dictionary->markerSize, params->markerBorderBits); if(borderErrors > maximumErrorsInBorder) return false; // border is wrong // take only inner bits Mat onlyBits = candidateBits.rowRange(params->markerBorderBits, candidateBits.rows - params->markerBorderBits) .colRange(params->markerBorderBits, candidateBits.rows - params->markerBorderBits); // try to indentify the marker int rotation; if(!dictionary->identify(onlyBits, idx, rotation, params->errorCorrectionRate)) return false; else { // shift corner positions to the correct rotation if(rotation != 0) { Mat copyPoints = _corners.getMat().clone(); for(int j = 0; j < 4; j++) _corners.getMat().ptr< Point2f >(0)[j] = copyPoints.ptr< Point2f >(0)[(j + 4 - rotation) % 4]; } return true; } } /** * ParallelLoopBody class for the parallelization of the marker identification step * Called from function _identifyCandidates() */ class IdentifyCandidatesParallel : public ParallelLoopBody { public: IdentifyCandidatesParallel(const Mat& _grey, InputArrayOfArrays _candidates, InputArrayOfArrays _contours, const Ptr<Dictionary> &_dictionary, vector< int >& _idsTmp, vector< char >& _validCandidates, const Ptr<DetectorParameters> &_params) : grey(_grey), candidates(_candidates), contours(_contours), dictionary(_dictionary), idsTmp(_idsTmp), validCandidates(_validCandidates), params(_params) {} void operator()(const Range &range) const { const int begin = range.start; const int end = range.end; for(int i = begin; i < end; i++) { int currId; Mat currentCandidate = candidates.getMat(i); if(_identifyOneCandidate(dictionary, grey, currentCandidate, currId, params)) { validCandidates[i] = 1; idsTmp[i] = currId; } } } private: IdentifyCandidatesParallel &operator=(const IdentifyCandidatesParallel &); // to quiet MSVC const Mat &grey; InputArrayOfArrays candidates, contours; const Ptr<Dictionary> &dictionary; vector< int > &idsTmp; vector< char > &validCandidates; const Ptr<DetectorParameters> &params; }; /** * @brief Copy the contents of a corners vector to an OutputArray, settings its size. */ static void _copyVector2Output(vector< vector< Point2f > > &vec, OutputArrayOfArrays out) { out.create((int)vec.size(), 1, CV_32FC2); if(out.isMatVector()) { for (unsigned int i = 0; i < vec.size(); i++) { out.create(4, 1, CV_32FC2, i); Mat &m = out.getMatRef(i); Mat(Mat(vec[i]).t()).copyTo(m); } } else if(out.isUMatVector()) { for (unsigned int i = 0; i < vec.size(); i++) { out.create(4, 1, CV_32FC2, i); UMat &m = out.getUMatRef(i); Mat(Mat(vec[i]).t()).copyTo(m); } } else if(out.kind() == _OutputArray::STD_VECTOR_VECTOR){ for (unsigned int i = 0; i < vec.size(); i++) { out.create(4, 1, CV_32FC2, i); Mat m = out.getMat(i); Mat(Mat(vec[i]).t()).copyTo(m); } } else { CV_Error(cv::Error::StsNotImplemented, "Only Mat vector, UMat vector, and vector<vector> OutputArrays are currently supported."); } } /** * @brief Identify square candidates according to a marker dictionary */ static void _identifyCandidates(InputArray _image, vector< vector< Point2f > >& _candidates, InputArrayOfArrays _contours, const Ptr<Dictionary> &_dictionary, vector< vector< Point2f > >& _accepted, vector< int >& ids, const Ptr<DetectorParameters> &params, OutputArrayOfArrays _rejected = noArray()) { int ncandidates = (int)_candidates.size(); vector< vector< Point2f > > accepted; vector< vector< Point2f > > rejected; CV_Assert(_image.getMat().total() != 0); Mat grey; _convertToGrey(_image.getMat(), grey); vector< int > idsTmp(ncandidates, -1); vector< char > validCandidates(ncandidates, 0); //// Analyze each of the candidates // for (int i = 0; i < ncandidates; i++) { // int currId = i; // Mat currentCandidate = _candidates.getMat(i); // if (_identifyOneCandidate(dictionary, grey, currentCandidate, currId, params)) { // validCandidates[i] = 1; // idsTmp[i] = currId; // } //} // this is the parallel call for the previous commented loop (result is equivalent) parallel_for_(Range(0, ncandidates), IdentifyCandidatesParallel(grey, _candidates, _contours, _dictionary, idsTmp, validCandidates, params)); for(int i = 0; i < ncandidates; i++) { if(validCandidates[i] == 1) { accepted.push_back(_candidates[i]); ids.push_back(idsTmp[i]); } else { rejected.push_back(_candidates[i]); } } // parse output _accepted = accepted; if(_rejected.needed()) { _copyVector2Output(rejected, _rejected); } } /** * @brief Final filter of markers after its identification */ static void _filterDetectedMarkers(vector< vector< Point2f > >& _corners, vector< int >& _ids) { CV_Assert(_corners.size() == _ids.size()); if(_corners.empty()) return; // mark markers that will be removed vector< bool > toRemove(_corners.size(), false); bool atLeastOneRemove = false; // remove repeated markers with same id, if one contains the other (doble border bug) for(unsigned int i = 0; i < _corners.size() - 1; i++) { for(unsigned int j = i + 1; j < _corners.size(); j++) { if(_ids[i] != _ids[j]) continue; // check if first marker is inside second bool inside = true; for(unsigned int p = 0; p < 4; p++) { Point2f point = _corners[j][p]; if(pointPolygonTest(_corners[i], point, false) < 0) { inside = false; break; } } if(inside) { toRemove[j] = true; atLeastOneRemove = true; continue; } // check the second marker inside = true; for(unsigned int p = 0; p < 4; p++) { Point2f point = _corners[i][p]; if(pointPolygonTest(_corners[j], point, false) < 0) { inside = false; break; } } if(inside) { toRemove[i] = true; atLeastOneRemove = true; continue; } } } // parse output if(atLeastOneRemove) { vector< vector< Point2f > >::iterator filteredCorners = _corners.begin(); vector< int >::iterator filteredIds = _ids.begin(); for(unsigned int i = 0; i < toRemove.size(); i++) { if(!toRemove[i]) { *filteredCorners++ = _corners[i]; *filteredIds++ = _ids[i]; } } _ids.erase(filteredIds, _ids.end()); _corners.erase(filteredCorners, _corners.end()); } } /** * @brief Return object points for the system centered in a single marker, given the marker length */ static void _getSingleMarkerObjectPoints(float markerLength, OutputArray _objPoints) { CV_Assert(markerLength > 0); _objPoints.create(4, 1, CV_32FC3); Mat objPoints = _objPoints.getMat(); // set coordinate system in the middle of the marker, with Z pointing out objPoints.ptr< Vec3f >(0)[0] = Vec3f(-markerLength / 2.f, markerLength / 2.f, 0); objPoints.ptr< Vec3f >(0)[1] = Vec3f(markerLength / 2.f, markerLength / 2.f, 0); objPoints.ptr< Vec3f >(0)[2] = Vec3f(markerLength / 2.f, -markerLength / 2.f, 0); objPoints.ptr< Vec3f >(0)[3] = Vec3f(-markerLength / 2.f, -markerLength / 2.f, 0); } /** * ParallelLoopBody class for the parallelization of the marker corner subpixel refinement * Called from function detectMarkers() */ class MarkerSubpixelParallel : public ParallelLoopBody { public: MarkerSubpixelParallel(const Mat *_grey, OutputArrayOfArrays _corners, const Ptr<DetectorParameters> &_params) : grey(_grey), corners(_corners), params(_params) {} void operator()(const Range &range) const { const int begin = range.start; const int end = range.end; for(int i = begin; i < end; i++) { cornerSubPix(*grey, corners.getMat(i), Size(params->cornerRefinementWinSize, params->cornerRefinementWinSize), Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, params->cornerRefinementMaxIterations, params->cornerRefinementMinAccuracy)); } } private: MarkerSubpixelParallel &operator=(const MarkerSubpixelParallel &); // to quiet MSVC const Mat *grey; OutputArrayOfArrays corners; const Ptr<DetectorParameters> &params; }; /** */ void detectMarkers(InputArray _image, const Ptr<Dictionary> &_dictionary, OutputArrayOfArrays _corners, OutputArray _ids, const Ptr<DetectorParameters> &_params, OutputArrayOfArrays _rejectedImgPoints) { CV_Assert(!_image.empty()); Mat grey; _convertToGrey(_image.getMat(), grey); /// STEP 1: Detect marker candidates vector< vector< Point2f > > candidates; vector< vector< Point > > contours; vector< int > ids; _detectCandidates(grey, candidates, contours, _params); /// STEP 2: Check candidate codification (identify markers) _identifyCandidates(grey, candidates, contours, _dictionary, candidates, ids, _params, _rejectedImgPoints); /// STEP 3: Filter detected markers; _filterDetectedMarkers(candidates, ids); // copy to output arrays _copyVector2Output(candidates, _corners); Mat(ids).copyTo(_ids); /// STEP 4: Corner refinement if(_params->doCornerRefinement) { CV_Assert(_params->cornerRefinementWinSize > 0 && _params->cornerRefinementMaxIterations > 0 && _params->cornerRefinementMinAccuracy > 0); //// do corner refinement for each of the detected markers // for (unsigned int i = 0; i < _corners.cols(); i++) { // cornerSubPix(grey, _corners.getMat(i), // Size(params.cornerRefinementWinSize, params.cornerRefinementWinSize), // Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, // params.cornerRefinementMaxIterations, // params.cornerRefinementMinAccuracy)); //} // this is the parallel call for the previous commented loop (result is equivalent) parallel_for_(Range(0, _corners.cols()), MarkerSubpixelParallel(&grey, _corners, _params)); } } /** * ParallelLoopBody class for the parallelization of the single markers pose estimation * Called from function estimatePoseSingleMarkers() */ class SinglePoseEstimationParallel : public ParallelLoopBody { public: SinglePoseEstimationParallel(Mat& _markerObjPoints, InputArrayOfArrays _corners, InputArray _cameraMatrix, InputArray _distCoeffs, Mat& _rvecs, Mat& _tvecs) : markerObjPoints(_markerObjPoints), corners(_corners), cameraMatrix(_cameraMatrix), distCoeffs(_distCoeffs), rvecs(_rvecs), tvecs(_tvecs) {} void operator()(const Range &range) const { const int begin = range.start; const int end = range.end; for(int i = begin; i < end; i++) { solvePnP(markerObjPoints, corners.getMat(i), cameraMatrix, distCoeffs, rvecs.at<Vec3d>(i), tvecs.at<Vec3d>(i)); } } private: SinglePoseEstimationParallel &operator=(const SinglePoseEstimationParallel &); // to quiet MSVC Mat& markerObjPoints; InputArrayOfArrays corners; InputArray cameraMatrix, distCoeffs; Mat& rvecs, tvecs; }; /** */ void estimatePoseSingleMarkers(InputArrayOfArrays _corners, float markerLength, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvecs, OutputArray _tvecs) { CV_Assert(markerLength > 0); Mat markerObjPoints; _getSingleMarkerObjectPoints(markerLength, markerObjPoints); int nMarkers = (int)_corners.total(); _rvecs.create(nMarkers, 1, CV_64FC3); _tvecs.create(nMarkers, 1, CV_64FC3); Mat rvecs = _rvecs.getMat(), tvecs = _tvecs.getMat(); //// for each marker, calculate its pose // for (int i = 0; i < nMarkers; i++) { // solvePnP(markerObjPoints, _corners.getMat(i), _cameraMatrix, _distCoeffs, // _rvecs.getMat(i), _tvecs.getMat(i)); //} // this is the parallel call for the previous commented loop (result is equivalent) parallel_for_(Range(0, nMarkers), SinglePoseEstimationParallel(markerObjPoints, _corners, _cameraMatrix, _distCoeffs, rvecs, tvecs)); } /** * @brief Given a board configuration and a set of detected markers, returns the corresponding * image points and object points to call solvePnP */ static void _getBoardObjectAndImagePoints(const Ptr<Board> &_board, InputArray _detectedIds, InputArrayOfArrays _detectedCorners, OutputArray _imgPoints, OutputArray _objPoints) { CV_Assert(_board->ids.size() == _board->objPoints.size()); CV_Assert(_detectedIds.total() == _detectedCorners.total()); size_t nDetectedMarkers = _detectedIds.total(); vector< Point3f > objPnts; objPnts.reserve(nDetectedMarkers); vector< Point2f > imgPnts; imgPnts.reserve(nDetectedMarkers); // look for detected markers that belong to the board and get their information for(unsigned int i = 0; i < nDetectedMarkers; i++) { int currentId = _detectedIds.getMat().ptr< int >(0)[i]; for(unsigned int j = 0; j < _board->ids.size(); j++) { if(currentId == _board->ids[j]) { for(int p = 0; p < 4; p++) { objPnts.push_back(_board->objPoints[j][p]); imgPnts.push_back(_detectedCorners.getMat(i).ptr< Point2f >(0)[p]); } } } } // create output Mat(objPnts).copyTo(_objPoints); Mat(imgPnts).copyTo(_imgPoints); } /** * Project board markers that are not included in the list of detected markers */ static void _projectUndetectedMarkers(const Ptr<Board> &_board, InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, InputArray _cameraMatrix, InputArray _distCoeffs, vector< vector< Point2f > >& _undetectedMarkersProjectedCorners, OutputArray _undetectedMarkersIds) { // first estimate board pose with the current avaible markers Mat rvec, tvec; int boardDetectedMarkers; boardDetectedMarkers = aruco::estimatePoseBoard(_detectedCorners, _detectedIds, _board, _cameraMatrix, _distCoeffs, rvec, tvec); // at least one marker from board so rvec and tvec are valid if(boardDetectedMarkers == 0) return; // search undetected markers and project them using the previous pose vector< vector< Point2f > > undetectedCorners; vector< int > undetectedIds; for(unsigned int i = 0; i < _board->ids.size(); i++) { int foundIdx = -1; for(unsigned int j = 0; j < _detectedIds.total(); j++) { if(_board->ids[i] == _detectedIds.getMat().ptr< int >()[j]) { foundIdx = j; break; } } // not detected if(foundIdx == -1) { undetectedCorners.push_back(vector< Point2f >()); undetectedIds.push_back(_board->ids[i]); projectPoints(_board->objPoints[i], rvec, tvec, _cameraMatrix, _distCoeffs, undetectedCorners.back()); } } // parse output Mat(undetectedIds).copyTo(_undetectedMarkersIds); _undetectedMarkersProjectedCorners = undetectedCorners; } /** * Interpolate board markers that are not included in the list of detected markers using * global homography */ static void _projectUndetectedMarkers(const Ptr<Board> &_board, InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, vector< vector< Point2f > >& _undetectedMarkersProjectedCorners, OutputArray _undetectedMarkersIds) { // check board points are in the same plane, if not, global homography cannot be applied CV_Assert(_board->objPoints.size() > 0); CV_Assert(_board->objPoints[0].size() > 0); float boardZ = _board->objPoints[0][0].z; for(unsigned int i = 0; i < _board->objPoints.size(); i++) { for(unsigned int j = 0; j < _board->objPoints[i].size(); j++) { CV_Assert(boardZ == _board->objPoints[i][j].z); } } vector< Point2f > detectedMarkersObj2DAll; // Object coordinates (without Z) of all the detected // marker corners in a single vector vector< Point2f > imageCornersAll; // Image corners of all detected markers in a single vector vector< vector< Point2f > > undetectedMarkersObj2D; // Object coordinates (without Z) of all // missing markers in different vectors vector< int > undetectedMarkersIds; // ids of missing markers // find markers included in board, and missing markers from board. Fill the previous vectors for(unsigned int j = 0; j < _board->ids.size(); j++) { bool found = false; for(unsigned int i = 0; i < _detectedIds.total(); i++) { if(_detectedIds.getMat().ptr< int >()[i] == _board->ids[j]) { for(int c = 0; c < 4; c++) { imageCornersAll.push_back(_detectedCorners.getMat(i).ptr< Point2f >()[c]); detectedMarkersObj2DAll.push_back( Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); } found = true; break; } } if(!found) { undetectedMarkersObj2D.push_back(vector< Point2f >()); for(int c = 0; c < 4; c++) { undetectedMarkersObj2D.back().push_back( Point2f(_board->objPoints[j][c].x, _board->objPoints[j][c].y)); } undetectedMarkersIds.push_back(_board->ids[j]); } } if(imageCornersAll.size() == 0) return; // get homography from detected markers Mat transformation = findHomography(detectedMarkersObj2DAll, imageCornersAll); _undetectedMarkersProjectedCorners.resize(undetectedMarkersIds.size()); // for each undetected marker, apply transformation for(unsigned int i = 0; i < undetectedMarkersObj2D.size(); i++) { perspectiveTransform(undetectedMarkersObj2D[i], _undetectedMarkersProjectedCorners[i], transformation); } Mat(undetectedMarkersIds).copyTo(_undetectedMarkersIds); } /** */ void refineDetectedMarkers(InputArray _image, const Ptr<Board> &_board, InputOutputArrayOfArrays _detectedCorners, InputOutputArray _detectedIds, InputOutputArrayOfArrays _rejectedCorners, InputArray _cameraMatrix, InputArray _distCoeffs, float minRepDistance, float errorCorrectionRate, bool checkAllOrders, OutputArray _recoveredIdxs, const Ptr<DetectorParameters> &_params) { CV_Assert(minRepDistance > 0); if(_detectedIds.total() == 0 || _rejectedCorners.total() == 0) return; DetectorParameters &params = *_params; // get projections of missing markers in the board vector< vector< Point2f > > undetectedMarkersCorners; vector< int > undetectedMarkersIds; if(_cameraMatrix.total() != 0) { // reproject based on camera projection model _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, _cameraMatrix, _distCoeffs, undetectedMarkersCorners, undetectedMarkersIds); } else { // reproject based on global homography _projectUndetectedMarkers(_board, _detectedCorners, _detectedIds, undetectedMarkersCorners, undetectedMarkersIds); } // list of missing markers indicating if they have been assigned to a candidate vector< bool > alreadyIdentified(_rejectedCorners.total(), false); // maximum bits that can be corrected Dictionary &dictionary = *(_board->dictionary); int maxCorrectionRecalculated = int(double(dictionary.maxCorrectionBits) * errorCorrectionRate); Mat grey; _convertToGrey(_image, grey); // vector of final detected marker corners and ids vector< Mat > finalAcceptedCorners; vector< int > finalAcceptedIds; // fill with the current markers finalAcceptedCorners.resize(_detectedCorners.total()); finalAcceptedIds.resize(_detectedIds.total()); for(unsigned int i = 0; i < _detectedIds.total(); i++) { finalAcceptedCorners[i] = _detectedCorners.getMat(i).clone(); finalAcceptedIds[i] = _detectedIds.getMat().ptr< int >()[i]; } vector< int > recoveredIdxs; // original indexes of accepted markers in _rejectedCorners // for each missing marker, try to find a correspondence for(unsigned int i = 0; i < undetectedMarkersIds.size(); i++) { // best match at the moment int closestCandidateIdx = -1; double closestCandidateDistance = minRepDistance * minRepDistance + 1; Mat closestRotatedMarker; for(unsigned int j = 0; j < _rejectedCorners.total(); j++) { if(alreadyIdentified[j]) continue; // check distance double minDistance = closestCandidateDistance + 1; bool valid = false; int validRot = 0; for(int c = 0; c < 4; c++) { // first corner in rejected candidate double currentMaxDistance = 0; for(int k = 0; k < 4; k++) { Point2f rejCorner = _rejectedCorners.getMat(j).ptr< Point2f >()[(c + k) % 4]; Point2f distVector = undetectedMarkersCorners[i][k] - rejCorner; double cornerDist = distVector.x * distVector.x + distVector.y * distVector.y; currentMaxDistance = max(currentMaxDistance, cornerDist); } // if distance is better than current best distance if(currentMaxDistance < closestCandidateDistance) { valid = true; validRot = c; minDistance = currentMaxDistance; } if(!checkAllOrders) break; } if(!valid) continue; // apply rotation Mat rotatedMarker; if(checkAllOrders) { rotatedMarker = Mat(4, 1, CV_32FC2); for(int c = 0; c < 4; c++) rotatedMarker.ptr< Point2f >()[c] = _rejectedCorners.getMat(j).ptr< Point2f >()[(c + 4 + validRot) % 4]; } else rotatedMarker = _rejectedCorners.getMat(j); // last filter, check if inner code is close enough to the assigned marker code int codeDistance = 0; // if errorCorrectionRate, dont check code if(errorCorrectionRate >= 0) { // extract bits Mat bits = _extractBits( grey, rotatedMarker, dictionary.markerSize, params.markerBorderBits, params.perspectiveRemovePixelPerCell, params.perspectiveRemoveIgnoredMarginPerCell, params.minOtsuStdDev); Mat onlyBits = bits.rowRange(params.markerBorderBits, bits.rows - params.markerBorderBits) .colRange(params.markerBorderBits, bits.rows - params.markerBorderBits); codeDistance = dictionary.getDistanceToId(onlyBits, undetectedMarkersIds[i], false); } // if everythin is ok, assign values to current best match if(errorCorrectionRate < 0 || codeDistance < maxCorrectionRecalculated) { closestCandidateIdx = j; closestCandidateDistance = minDistance; closestRotatedMarker = rotatedMarker; } } // if at least one good match, we have rescue the missing marker if(closestCandidateIdx >= 0) { // subpixel refinement if(params.doCornerRefinement) { CV_Assert(params.cornerRefinementWinSize > 0 && params.cornerRefinementMaxIterations > 0 && params.cornerRefinementMinAccuracy > 0); cornerSubPix(grey, closestRotatedMarker, Size(params.cornerRefinementWinSize, params.cornerRefinementWinSize), Size(-1, -1), TermCriteria(TermCriteria::MAX_ITER | TermCriteria::EPS, params.cornerRefinementMaxIterations, params.cornerRefinementMinAccuracy)); } // remove from rejected alreadyIdentified[closestCandidateIdx] = true; // add to detected finalAcceptedCorners.push_back(closestRotatedMarker); finalAcceptedIds.push_back(undetectedMarkersIds[i]); // add the original index of the candidate recoveredIdxs.push_back(closestCandidateIdx); } } // parse output if(finalAcceptedIds.size() != _detectedIds.total()) { _detectedCorners.clear(); _detectedIds.clear(); // parse output Mat(finalAcceptedIds).copyTo(_detectedIds); _detectedCorners.create((int)finalAcceptedCorners.size(), 1, CV_32FC2); for(unsigned int i = 0; i < finalAcceptedCorners.size(); i++) { _detectedCorners.create(4, 1, CV_32FC2, i, true); for(int j = 0; j < 4; j++) { _detectedCorners.getMat(i).ptr< Point2f >()[j] = finalAcceptedCorners[i].ptr< Point2f >()[j]; } } // recalculate _rejectedCorners based on alreadyIdentified vector< Mat > finalRejected; for(unsigned int i = 0; i < alreadyIdentified.size(); i++) { if(!alreadyIdentified[i]) { finalRejected.push_back(_rejectedCorners.getMat(i).clone()); } } _rejectedCorners.clear(); _rejectedCorners.create((int)finalRejected.size(), 1, CV_32FC2); for(unsigned int i = 0; i < finalRejected.size(); i++) { _rejectedCorners.create(4, 1, CV_32FC2, i, true); for(int j = 0; j < 4; j++) { _rejectedCorners.getMat(i).ptr< Point2f >()[j] = finalRejected[i].ptr< Point2f >()[j]; } } if(_recoveredIdxs.needed()) { Mat(recoveredIdxs).copyTo(_recoveredIdxs); } } } /** */ int estimatePoseBoard(InputArrayOfArrays _corners, InputArray _ids, const Ptr<Board> &board, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec) { CV_Assert(_corners.total() == _ids.total()); // get object and image points for the solvePnP function Mat objPoints, imgPoints; _getBoardObjectAndImagePoints(board, _ids, _corners, imgPoints, objPoints); CV_Assert(imgPoints.total() == objPoints.total()); if(objPoints.total() == 0) // 0 of the detected markers in board return 0; bool useExtrinsicGuess = true; if (_rvec.empty() || _tvec.empty()) { _rvec.create(3, 1, CV_64FC1); _tvec.create(3, 1, CV_64FC1); useExtrinsicGuess = false; } solvePnP(objPoints, imgPoints, _cameraMatrix, _distCoeffs, _rvec, _tvec, useExtrinsicGuess); // divide by four since all the four corners are concatenated in the array for each marker return (int)objPoints.total() / 4; } /** */ void GridBoard::draw(Size outSize, OutputArray _img, int marginSize, int borderBits) { _drawPlanarBoardImpl(this, outSize, _img, marginSize, borderBits); } /** */ Ptr<Board> Board::create(InputArrayOfArrays objPoints, const Ptr<Dictionary> &dictionary, InputArray ids) { CV_Assert(objPoints.total() == ids.total()); CV_Assert(objPoints.type() == CV_32FC3); std::vector< std::vector< Point3f > > obj_points_vector; for (unsigned int i = 0; i < objPoints.total(); i++) { std::vector<Point3f> corners; Mat corners_mat = objPoints.getMat(i); for (int j = 0; j < 4; j++) { corners.push_back(corners_mat.at<Point3f>(j)); } obj_points_vector.push_back(corners); } Ptr<Board> res = makePtr<Board>(); ids.copyTo(res->ids); res->objPoints = obj_points_vector; res->dictionary = cv::makePtr<Dictionary>(dictionary); return res; } /** */ Ptr<GridBoard> GridBoard::create(int markersX, int markersY, float markerLength, float markerSeparation, const Ptr<Dictionary> &dictionary, int firstMarker) { CV_Assert(markersX > 0 && markersY > 0 && markerLength > 0 && markerSeparation > 0); Ptr<GridBoard> res = makePtr<GridBoard>(); res->_markersX = markersX; res->_markersY = markersY; res->_markerLength = markerLength; res->_markerSeparation = markerSeparation; res->dictionary = dictionary; size_t totalMarkers = (size_t) markersX * markersY; res->ids.resize(totalMarkers); res->objPoints.reserve(totalMarkers); // fill ids with first identifiers for(unsigned int i = 0; i < totalMarkers; i++) { res->ids[i] = i + firstMarker; } // calculate Board objPoints float maxY = (float)markersY * markerLength + (markersY - 1) * markerSeparation; for(int y = 0; y < markersY; y++) { for(int x = 0; x < markersX; x++) { vector< Point3f > corners; corners.resize(4); corners[0] = Point3f(x * (markerLength + markerSeparation), maxY - y * (markerLength + markerSeparation), 0); corners[1] = corners[0] + Point3f(markerLength, 0, 0); corners[2] = corners[0] + Point3f(markerLength, -markerLength, 0); corners[3] = corners[0] + Point3f(0, -markerLength, 0); res->objPoints.push_back(corners); } } return res; } /** */ void drawDetectedMarkers(InputOutputArray _image, InputArrayOfArrays _corners, InputArray _ids, Scalar borderColor) { CV_Assert(_image.getMat().total() != 0 && (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); CV_Assert((_corners.total() == _ids.total()) || _ids.total() == 0); // calculate colors Scalar textColor, cornerColor; textColor = cornerColor = borderColor; swap(textColor.val[0], textColor.val[1]); // text color just sawp G and R swap(cornerColor.val[1], cornerColor.val[2]); // corner color just sawp G and B int nMarkers = (int)_corners.total(); for(int i = 0; i < nMarkers; i++) { Mat currentMarker = _corners.getMat(i); CV_Assert(currentMarker.total() == 4 && currentMarker.type() == CV_32FC2); // draw marker sides for(int j = 0; j < 4; j++) { Point2f p0, p1; p0 = currentMarker.ptr< Point2f >(0)[j]; p1 = currentMarker.ptr< Point2f >(0)[(j + 1) % 4]; line(_image, p0, p1, borderColor, 1); } // draw first corner mark rectangle(_image, currentMarker.ptr< Point2f >(0)[0] - Point2f(3, 3), currentMarker.ptr< Point2f >(0)[0] + Point2f(3, 3), cornerColor, 1, LINE_AA); // draw ID if(_ids.total() != 0) { Point2f cent(0, 0); for(int p = 0; p < 4; p++) cent += currentMarker.ptr< Point2f >(0)[p]; cent = cent / 4.; stringstream s; s << "id=" << _ids.getMat().ptr< int >(0)[i]; putText(_image, s.str(), cent, FONT_HERSHEY_SIMPLEX, 0.5, textColor, 2); } } } /** */ void drawAxis(InputOutputArray _image, InputArray _cameraMatrix, InputArray _distCoeffs, InputArray _rvec, InputArray _tvec, float length) { CV_Assert(_image.getMat().total() != 0 && (_image.getMat().channels() == 1 || _image.getMat().channels() == 3)); CV_Assert(length > 0); // project axis points vector< Point3f > axisPoints; axisPoints.push_back(Point3f(0, 0, 0)); axisPoints.push_back(Point3f(length, 0, 0)); axisPoints.push_back(Point3f(0, length, 0)); axisPoints.push_back(Point3f(0, 0, length)); vector< Point2f > imagePoints; projectPoints(axisPoints, _rvec, _tvec, _cameraMatrix, _distCoeffs, imagePoints); // draw axis lines line(_image, imagePoints[0], imagePoints[1], Scalar(0, 0, 255), 3); line(_image, imagePoints[0], imagePoints[2], Scalar(0, 255, 0), 3); line(_image, imagePoints[0], imagePoints[3], Scalar(255, 0, 0), 3); } /** */ void drawMarker(const Ptr<Dictionary> &dictionary, int id, int sidePixels, OutputArray _img, int borderBits) { dictionary->drawMarker(id, sidePixels, _img, borderBits); } void _drawPlanarBoardImpl(Board *_board, Size outSize, OutputArray _img, int marginSize, int borderBits) { CV_Assert(outSize.area() > 0); CV_Assert(marginSize >= 0); _img.create(outSize, CV_8UC1); Mat out = _img.getMat(); out.setTo(Scalar::all(255)); out.adjustROI(-marginSize, -marginSize, -marginSize, -marginSize); // calculate max and min values in XY plane CV_Assert(_board->objPoints.size() > 0); float minX, maxX, minY, maxY; minX = maxX = _board->objPoints[0][0].x; minY = maxY = _board->objPoints[0][0].y; for(unsigned int i = 0; i < _board->objPoints.size(); i++) { for(int j = 0; j < 4; j++) { minX = min(minX, _board->objPoints[i][j].x); maxX = max(maxX, _board->objPoints[i][j].x); minY = min(minY, _board->objPoints[i][j].y); maxY = max(maxY, _board->objPoints[i][j].y); } } float sizeX = maxX - minX; float sizeY = maxY - minY; // proportion transformations float xReduction = sizeX / float(out.cols); float yReduction = sizeY / float(out.rows); // determine the zone where the markers are placed if(xReduction > yReduction) { int nRows = int(sizeY / xReduction); int rowsMargins = (out.rows - nRows) / 2; out.adjustROI(-rowsMargins, -rowsMargins, 0, 0); } else { int nCols = int(sizeX / yReduction); int colsMargins = (out.cols - nCols) / 2; out.adjustROI(0, 0, -colsMargins, -colsMargins); } // now paint each marker Dictionary &dictionary = *(_board->dictionary); Mat marker; Point2f outCorners[3]; Point2f inCorners[3]; for(unsigned int m = 0; m < _board->objPoints.size(); m++) { // transform corners to markerZone coordinates for(int j = 0; j < 3; j++) { Point2f pf = Point2f(_board->objPoints[m][j].x, _board->objPoints[m][j].y); // move top left to 0, 0 pf -= Point2f(minX, minY); pf.x = pf.x / sizeX * float(out.cols); pf.y = (1.0f - pf.y / sizeY) * float(out.rows); outCorners[j] = pf; } // get marker Size dst_sz(outCorners[2] - outCorners[0]); // assuming CCW order dictionary.drawMarker(_board->ids[m], dst_sz.width, marker, borderBits); if((outCorners[0].y == outCorners[1].y) && (outCorners[1].x == outCorners[2].x)) { // marker is aligned to image axes marker.copyTo(out(Rect(outCorners[0], dst_sz))); continue; } // interpolate tiny marker to marker position in markerZone inCorners[0] = Point2f(-0.5f, -0.5f); inCorners[1] = Point2f(marker.cols - 0.5f, -0.5f); inCorners[2] = Point2f(marker.cols - 0.5f, marker.rows - 0.5f); // remove perspective Mat transformation = getAffineTransform(inCorners, outCorners); warpAffine(marker, out, transformation, out.size(), INTER_LINEAR, BORDER_TRANSPARENT); } } /** */ void drawPlanarBoard(const Ptr<Board> &_board, Size outSize, OutputArray _img, int marginSize, int borderBits) { _drawPlanarBoardImpl(_board, outSize, _img, marginSize, borderBits); } /** */ double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, const Ptr<Board> &board, Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, OutputArray _stdDeviationsIntrinsics, OutputArray _stdDeviationsExtrinsics, OutputArray _perViewErrors, int flags, TermCriteria criteria) { // for each frame, get properly processed imagePoints and objectPoints for the calibrateCamera // function vector< Mat > processedObjectPoints, processedImagePoints; size_t nFrames = _counter.total(); int markerCounter = 0; for(size_t frame = 0; frame < nFrames; frame++) { int nMarkersInThisFrame = _counter.getMat().ptr< int >()[frame]; vector< Mat > thisFrameCorners; vector< int > thisFrameIds; CV_Assert(nMarkersInThisFrame > 0); thisFrameCorners.reserve((size_t) nMarkersInThisFrame); thisFrameIds.reserve((size_t) nMarkersInThisFrame); for(int j = markerCounter; j < markerCounter + nMarkersInThisFrame; j++) { thisFrameCorners.push_back(_corners.getMat(j)); thisFrameIds.push_back(_ids.getMat().ptr< int >()[j]); } markerCounter += nMarkersInThisFrame; Mat currentImgPoints, currentObjPoints; _getBoardObjectAndImagePoints(board, thisFrameIds, thisFrameCorners, currentImgPoints, currentObjPoints); if(currentImgPoints.total() > 0 && currentObjPoints.total() > 0) { processedImagePoints.push_back(currentImgPoints); processedObjectPoints.push_back(currentObjPoints); } } return calibrateCamera(processedObjectPoints, processedImagePoints, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, _stdDeviationsIntrinsics, _stdDeviationsExtrinsics, _perViewErrors, flags, criteria); } /** */ double calibrateCameraAruco(InputArrayOfArrays _corners, InputArray _ids, InputArray _counter, const Ptr<Board> &board, Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs, OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria) { return calibrateCameraAruco(_corners, _ids, _counter, board, imageSize, _cameraMatrix, _distCoeffs, _rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria); } } }
#include <bits/stdc++.h> using namespace std; // ***** struct Obstacle { int X, Y; }; struct range_t { double aunder = 0.0, aover = INFINITY; }; bool operator<(range_t lhs, range_t rhs) { return tie(lhs.aunder, lhs.aover) < tie(rhs.aunder, rhs.aover); } int N, P, H; vector<Obstacle> O; double CEILING; inline double f(double a, double x) { return a * (x * (P - x)); } inline double a_through(double x, double y) { return y / (x * (P - x)); } inline double dist(double x1, double y1, double x2, double y2) { double dx = x1 - x2, dy = y1 - y2; return dx * dx + dy * dy; } double dist_parabola(double x, double y, double a) { double l = P / 2.0, r = P; constexpr double w13 = 1.0 / 3.0, w23 = 2.0 / 3.0; do { double X1 = w23 * l + w13 * r; double X2 = w13 * l + w23 * r; double D1 = dist(x, y, X1, f(a, X1)); double D2 = dist(x, y, X2, f(a, X2)); (D1 < D2) ? r = X2 : l = X1; } while (r - l > 1e-12); return dist(x, y, l, f(a, l)); } void a_around(int x, int y, double R, double &aunder, double &aover) { bool cannot_go_under = y < R; bool cannot_go_over = x < R || x + R > P; aunder = cannot_go_under ? -1.0 : 0.0; aover = CEILING + 1.0; double aO = a_through(x, y); double aunder_max = aO; double aover_min = aO; cannot_go_over = cannot_go_over || aover_min >= CEILING; if (!cannot_go_under) { do { double a = 0.5 * (aunder + aunder_max); double PQ = dist_parabola(x, y, a); (PQ <= R * R) ? aunder_max = a : aunder = a; } while (aunder_max - aunder > 1e-12); } if (!cannot_go_over) { do { double a = 0.5 * (aover + aover_min); double PQ = dist_parabola(x, y, a); (PQ <= R * R) ? aover_min = a : aover = a; } while (aover - aover_min > 1e-12); } } bool check(double R) { vector<range_t> ranges(N); CEILING = a_through(P / 2.0, H - R); for (int i = 0; i < N; i++) { double &aunder = ranges[i].aunder; double &aover = ranges[i].aover; a_around(O[i].X, O[i].Y, R, aunder, aover); } sort(begin(ranges), end(ranges)); double top = 0.0; for (int i = 0; i < N; i++) { double aunder = ranges[i].aunder; double aover = ranges[i].aover; if (aunder >= top) { return true; } top = max(top, aover); } return top <= CEILING; } auto solve() { cin >> N >> P >> H >> ws; O.resize(N); for (int i = 0; i < N; i++) { cin >> O[i].X >> O[i].Y >> ws; O[i].X = max(O[i].X, P - O[i].X); } double l = 0.0, r = H; do { double R = 0.5 * (l + r); check(R) ? l = R : r = R; } while (r - l > 1e-11); return 0.5 * (l + r); } // ***** int main() { unsigned T; cout << fixed << showpoint << setprecision(7); cin >> T >> ws; for (unsigned t = 1; t <= T; ++t) { auto solution = solve(); cout << "Case #" << t << ": " << solution << '\n'; } return 0; }
/* For more information, please see: http://software.sci.utah.edu The MIT License Copyright (c) 2020 Scientific Computing and Imaging Institute, University of Utah. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ //Uncomment line below to check for memory leaks (run in debug mode VS) //#define LOOK_FOR_MEMORY_LEAKS #include <Core/Application/Application.h> #include <Interface/Application/GuiApplication.h> #include <Core/ConsoleApplication/ConsoleApplication.h> #include <Core/Utils/Legacy/Environment.h> #include <iostream> #ifdef BUILD_WITH_PYTHON #include <Core/Python/PythonInterpreter.h> #endif using namespace SCIRun; using namespace SCIRun::Core; using namespace SCIRun::Gui; using namespace SCIRun::Core::Console; int mainImpl(int argc, const char* argv[], char **environment) { #ifdef LOOK_FOR_MEMORY_LEAKS _CrtSetDbgFlag(_CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF); //_CrtSetBreakAlloc(34006); #endif //char** env = nullptr; //TODO: passed as third argument from main, needs testing. create_sci_environment(environment, argv[0]); Application::Instance().readCommandLine(argc, argv); #ifdef BUILD_WITH_PYTHON SCIRun::Core::PythonInterpreter::Instance().initialize(true, Application::Instance().parameters()->entireCommandLine(), Application::Instance().executablePath()); #endif //TODO: must read --headless flag here, or try pushing command queue building all the way up here //TODO: https://doc.qt.io/qt-5/qapplication.html#details #ifndef BUILD_HEADLESS return GuiApplication::run(argc, argv); #else return ConsoleApplication::run(argc, argv); #endif } // If building on WIN32, use this entry point. #ifdef WIN32 #include <windows.h> const char* utf8_encode(const std::wstring &wstr) { if (wstr.empty()) return ""; int size_needed = WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), NULL, 0, NULL, NULL); char* strTo = new char[size_needed + 1]; strTo[size_needed] = 0; WideCharToMultiByte(CP_UTF8, 0, &wstr[0], (int)wstr.size(), strTo, size_needed, NULL, NULL); return strTo; } static std::vector<std::string> env_strings; static char** winEnvironmentArray; static char* toCString(const std::string& str) { char* cstring = new char[str.size() + 1]; std::copy(str.begin(), str.end(), cstring); cstring[str.size()] = 0; return cstring; } int CALLBACK WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) { #ifdef SCIRUN_SHOW_CONSOLE AllocConsole(); freopen("CONIN$", "r", stdin); freopen("CONOUT$", "w", stdout); freopen("CONOUT$", "w", stderr); #endif const char *argv[100] = { 0 }; int argc; { LPWSTR *szArglist; szArglist = CommandLineToArgvW(GetCommandLineW(), &argc); if (!szArglist) { std::cout << "CommandLineToArgvW failed" << std::endl; return 7; } else { for (int i = 0; i < argc; i++) { argv[i] = utf8_encode(szArglist[i]); } } // Free memory allocated for CommandLineToArgvW arguments. LocalFree(szArglist); } { const char* a = GetEnvironmentStrings(); int prev = 0; for (int i = 0;; i++) { if (a[i] == '\0') { env_strings.push_back(std::string(a + prev, a + i)); prev = i + 1; if (a[i + 1] == '\0') { break; } } } } winEnvironmentArray = new char*[env_strings.size() + 1]; auto winEnvironmentArrayPtr = winEnvironmentArray; for (const auto& env : env_strings) { *winEnvironmentArrayPtr++ = toCString(env); } winEnvironmentArray[env_strings.size()] = nullptr; return mainImpl(argc, argv, winEnvironmentArray); } #else // If not WIN32 use this main()/entry point. int main(int argc, const char* argv[], char **environment) { return mainImpl(argc, argv, environment); } #endif // End of main for non-Windows.
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/drive/chromeos/file_system/set_property_operation.h" #include "base/files/file_path.h" #include "components/drive/drive.pb.h" #include "components/drive/file_errors.h" #include "components/drive/file_system/operation_test_base.h" #include "content/public/test/test_utils.h" #include "google_apis/drive/drive_api_requests.h" #include "google_apis/drive/test_util.h" #include "testing/gtest/include/gtest/gtest.h" namespace drive { namespace file_system { namespace { const base::FilePath::CharType kTestPath[] = FILE_PATH_LITERAL("drive/root/File 1.txt"); const char kTestKey[] = "key"; const char kTestValue[] = "value"; const char kTestAnotherValue[] = "another-value"; } // namespace typedef OperationTestBase SetPropertyOperationTest; TEST_F(SetPropertyOperationTest, SetProperty) { SetPropertyOperation operation(blocking_task_runner(), delegate(), metadata()); const base::FilePath test_path(kTestPath); FileError result = FILE_ERROR_FAILED; operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PRIVATE, kTestKey, kTestValue, google_apis::test_util::CreateCopyResultCallback(&result)); content::RunAllBlockingPoolTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, result); ResourceEntry entry; EXPECT_EQ(FILE_ERROR_OK, GetLocalResourceEntry(test_path, &entry)); EXPECT_EQ(ResourceEntry::DIRTY, entry.metadata_edit_state()); ASSERT_EQ(1, entry.new_properties().size()); const drive::Property property = entry.new_properties().Get(0); EXPECT_EQ(Property_Visibility_PRIVATE, property.visibility()); EXPECT_EQ(kTestKey, property.key()); EXPECT_EQ(kTestValue, property.value()); EXPECT_EQ(0u, delegate()->get_changed_files().size()); EXPECT_FALSE(delegate()->get_changed_files().count(test_path)); EXPECT_EQ(1u, delegate()->updated_local_ids().size()); EXPECT_TRUE(delegate()->updated_local_ids().count(entry.local_id())); } TEST_F(SetPropertyOperationTest, SetProperty_Duplicate) { SetPropertyOperation operation(blocking_task_runner(), delegate(), metadata()); const base::FilePath test_path(kTestPath); FileError result = FILE_ERROR_FAILED; operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PRIVATE, kTestKey, kTestValue, google_apis::test_util::CreateCopyResultCallback(&result)); operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PRIVATE, kTestKey, kTestValue, google_apis::test_util::CreateCopyResultCallback(&result)); content::RunAllBlockingPoolTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, result); ResourceEntry entry; EXPECT_EQ(FILE_ERROR_OK, GetLocalResourceEntry(test_path, &entry)); EXPECT_EQ(1, entry.new_properties().size()); } TEST_F(SetPropertyOperationTest, SetProperty_Overwrite) { SetPropertyOperation operation(blocking_task_runner(), delegate(), metadata()); const base::FilePath test_path(kTestPath); FileError result = FILE_ERROR_FAILED; operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PUBLIC, kTestKey, kTestValue, google_apis::test_util::CreateCopyResultCallback(&result)); operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PUBLIC, kTestKey, kTestAnotherValue, google_apis::test_util::CreateCopyResultCallback(&result)); content::RunAllBlockingPoolTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, result); ResourceEntry entry; EXPECT_EQ(FILE_ERROR_OK, GetLocalResourceEntry(test_path, &entry)); ASSERT_EQ(1, entry.new_properties().size()); const drive::Property property = entry.new_properties().Get(0); EXPECT_EQ(Property_Visibility_PUBLIC, property.visibility()); EXPECT_EQ(kTestKey, property.key()); EXPECT_EQ(kTestAnotherValue, property.value()); } TEST_F(SetPropertyOperationTest, SetProperty_DifferentVisibilities) { SetPropertyOperation operation(blocking_task_runner(), delegate(), metadata()); { const base::FilePath test_path(kTestPath); FileError result = FILE_ERROR_FAILED; operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PRIVATE, kTestKey, kTestValue, google_apis::test_util::CreateCopyResultCallback(&result)); content::RunAllBlockingPoolTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, result); ResourceEntry entry; EXPECT_EQ(FILE_ERROR_OK, GetLocalResourceEntry(test_path, &entry)); ASSERT_EQ(1, entry.new_properties().size()); const drive::Property property = entry.new_properties().Get(0); EXPECT_EQ(Property_Visibility_PRIVATE, property.visibility()); EXPECT_EQ(kTestKey, property.key()); EXPECT_EQ(kTestValue, property.value()); } // Insert another property with the same key, same value but different // visibility. { const base::FilePath test_path(kTestPath); FileError result = FILE_ERROR_FAILED; operation.SetProperty( test_path, google_apis::drive::Property::Visibility::VISIBILITY_PUBLIC, kTestKey, kTestAnotherValue, google_apis::test_util::CreateCopyResultCallback(&result)); content::RunAllBlockingPoolTasksUntilIdle(); EXPECT_EQ(FILE_ERROR_OK, result); ResourceEntry entry; EXPECT_EQ(FILE_ERROR_OK, GetLocalResourceEntry(test_path, &entry)); ASSERT_EQ(2, entry.new_properties().size()); const drive::Property property = entry.new_properties().Get(1); EXPECT_EQ(Property_Visibility_PUBLIC, property.visibility()); EXPECT_EQ(kTestKey, property.key()); EXPECT_EQ(kTestAnotherValue, property.value()); } } } // namespace file_system } // namespace drive
#include "cpgf/scriptbind/gscriptservice.h" #include "cpgf/scriptbind/gscriptbind.h" #include "cpgf/scriptbind/gscriptbindutil.h" #include "cpgf/metadata/metautility/gmetadata_metabytearray.h" #include "cpgf/metadata/metautility/gmetadata_metaobjectarray.h" #include "cpgf/metautility/gmetabytearray.h" #include "cpgf/metautility/gmetaobjectarray.h" #include "cpgf/scriptbind/gscriptlibraryapi.h" #include "cpgf/metadata/private/gmetadata_header.h" #include "cpgf/gscopedinterface.h" namespace cpgf { IScriptLibraryLoader * createBuiltinLibraries(GScriptObject * scriptObject); template <typename D> void buildMetaClass_GScriptCoreService(D _d) { _d.CPGF_MD_TEMPLATE _method("import", &D::ClassType::loadLibrary); _d.CPGF_MD_TEMPLATE _method("_import", &D::ClassType::loadLibrary); _d.CPGF_MD_TEMPLATE _method("setAllowGC", &D::ClassType::setAllowGC); } GScriptCoreService * doBindScriptCoreService(GScriptObject * scriptObject, const char * bindName, IScriptLibraryLoader * libraryLoader) { GScopedPointer<GScriptCoreService> coreService(new GScriptCoreService(scriptObject, bindName, libraryLoader)); GDefineMetaClass<GScriptCoreService> define = GDefineMetaClass<GScriptCoreService>::Policy<GMetaPolicyNoDefaultAndCopyConstructor>::declare("GScriptCoreService"); buildMetaClass_GScriptCoreService(define); injectObjectToScript(scriptObject, define.getMetaClass(), coreService.get(), bindName); GScopedInterface<IMetaItem> metaItem(metaItemToInterface(define.takeMetaClass(), true)); scriptObject->holdObject(metaItem.get()); return coreService.take(); } GScriptCoreService::GScriptCoreService(GScriptObject * scriptObject, const char * bindName, IScriptLibraryLoader * libraryLoader) : scriptObject(scriptObject), bindName(bindName), libraryLoader(libraryLoader) { } GScriptCoreService::~GScriptCoreService() { } bool GScriptCoreService::loadLibrary(const char * namespaces, const GMetaVariadicParam * libraryNames) { if(! this->libraryLoader) { GScopedInterface<IScriptLibraryLoader> loader(createBuiltinLibraries(this->scriptObject)); this->libraryLoader.reset(loader.get()); } if(namespaces == nullptr) { namespaces = this->bindName.c_str(); } GScopedInterface<IScriptObject> owner(scriptObjectToInterface(this->scriptObject, false)); for(size_t i = 0; i < libraryNames->paramCount; ++i) { char * name = fromVariant<char *>(*(libraryNames->params[i])); if(! this->libraryLoader->loadScriptLibrary(owner.get(), namespaces, name)) { return false; } } return true; } void GScriptCoreService::setAllowGC(const GVariant & instance, bool allowGC) { this->scriptObject->getContext()->setAllowGC(&instance.refData(), allowGC); } } // namespace cpgf #include "cpgf/metadata/private/gmetadata_footer.h"
#include "storm/storage/prism/StateReward.h" #include "storm/storage/expressions/Variable.h" namespace storm { namespace prism { StateReward::StateReward(storm::expressions::Expression const& statePredicateExpression, storm::expressions::Expression const& rewardValueExpression, std::string const& filename, uint_fast64_t lineNumber) : LocatedInformation(filename, lineNumber), statePredicateExpression(statePredicateExpression), rewardValueExpression(rewardValueExpression) { // Nothing to do here. } storm::expressions::Expression const& StateReward::getStatePredicateExpression() const { return this->statePredicateExpression; } storm::expressions::Expression const& StateReward::getRewardValueExpression() const { return this->rewardValueExpression; } StateReward StateReward::substitute(std::map<storm::expressions::Variable, storm::expressions::Expression> const& substitution) const { return StateReward(this->getStatePredicateExpression().substitute(substitution), this->getRewardValueExpression().substitute(substitution), this->getFilename(), this->getLineNumber()); } std::ostream& operator<<(std::ostream& stream, StateReward const& stateReward) { stream << "\t" << stateReward.getStatePredicateExpression() << ": " << stateReward.getRewardValueExpression() << ";"; return stream; } } // namespace prism } // namespace storm
// Copyright 2013 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "src/base/platform/time.h" #if V8_OS_POSIX #include <fcntl.h> // for O_RDONLY #include <sys/time.h> #include <unistd.h> #endif #if V8_OS_MACOSX #include <mach/mach.h> #include <mach/mach_time.h> #include <pthread.h> #endif #include <cstring> #include <ostream> #if V8_OS_WIN #include "src/base/atomicops.h" #include "src/base/lazy-instance.h" #include "src/base/win32-headers.h" #endif #include "src/base/cpu.h" #include "src/base/logging.h" #include "src/base/platform/platform.h" namespace { #if V8_OS_MACOSX int64_t ComputeThreadTicks() { mach_msg_type_number_t thread_info_count = THREAD_BASIC_INFO_COUNT; thread_basic_info_data_t thread_info_data; kern_return_t kr = thread_info( pthread_mach_thread_np(pthread_self()), THREAD_BASIC_INFO, reinterpret_cast<thread_info_t>(&thread_info_data), &thread_info_count); CHECK(kr == KERN_SUCCESS); v8::base::CheckedNumeric<int64_t> absolute_micros( thread_info_data.user_time.seconds + thread_info_data.system_time.seconds); absolute_micros *= v8::base::Time::kMicrosecondsPerSecond; absolute_micros += (thread_info_data.user_time.microseconds + thread_info_data.system_time.microseconds); return absolute_micros.ValueOrDie(); } #elif V8_OS_POSIX // Helper function to get results from clock_gettime() and convert to a // microsecond timebase. Minimum requirement is MONOTONIC_CLOCK to be supported // on the system. FreeBSD 6 has CLOCK_MONOTONIC but defines // _POSIX_MONOTONIC_CLOCK to -1. V8_INLINE int64_t ClockNow(clockid_t clk_id) { #if (defined(_POSIX_MONOTONIC_CLOCK) && _POSIX_MONOTONIC_CLOCK >= 0) || \ defined(V8_OS_BSD) || defined(V8_OS_ANDROID) struct timespec ts; if (clock_gettime(clk_id, &ts) != 0) { UNREACHABLE(); return 0; } v8::base::internal::CheckedNumeric<int64_t> result(ts.tv_sec); result *= v8::base::Time::kMicrosecondsPerSecond; result += (ts.tv_nsec / v8::base::Time::kNanosecondsPerMicrosecond); return result.ValueOrDie(); #else // Monotonic clock not supported. return 0; #endif } #elif V8_OS_WIN V8_INLINE bool IsQPCReliable() { v8::base::CPU cpu; // On Athlon X2 CPUs (e.g. model 15) QueryPerformanceCounter is unreliable. return strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15; } // Returns the current value of the performance counter. V8_INLINE uint64_t QPCNowRaw() { LARGE_INTEGER perf_counter_now = {}; // According to the MSDN documentation for QueryPerformanceCounter(), this // will never fail on systems that run XP or later. // https://msdn.microsoft.com/library/windows/desktop/ms644904.aspx BOOL result = ::QueryPerformanceCounter(&perf_counter_now); DCHECK(result); USE(result); return perf_counter_now.QuadPart; } #endif // V8_OS_MACOSX } // namespace namespace v8 { namespace base { TimeDelta TimeDelta::FromDays(int days) { return TimeDelta(days * Time::kMicrosecondsPerDay); } TimeDelta TimeDelta::FromHours(int hours) { return TimeDelta(hours * Time::kMicrosecondsPerHour); } TimeDelta TimeDelta::FromMinutes(int minutes) { return TimeDelta(minutes * Time::kMicrosecondsPerMinute); } TimeDelta TimeDelta::FromSeconds(int64_t seconds) { return TimeDelta(seconds * Time::kMicrosecondsPerSecond); } TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) { return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond); } TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) { return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond); } int TimeDelta::InDays() const { return static_cast<int>(delta_ / Time::kMicrosecondsPerDay); } int TimeDelta::InHours() const { return static_cast<int>(delta_ / Time::kMicrosecondsPerHour); } int TimeDelta::InMinutes() const { return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute); } double TimeDelta::InSecondsF() const { return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond; } int64_t TimeDelta::InSeconds() const { return delta_ / Time::kMicrosecondsPerSecond; } double TimeDelta::InMillisecondsF() const { return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond; } int64_t TimeDelta::InMilliseconds() const { return delta_ / Time::kMicrosecondsPerMillisecond; } int64_t TimeDelta::InNanoseconds() const { return delta_ * Time::kNanosecondsPerMicrosecond; } #if V8_OS_MACOSX TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) { DCHECK_GE(ts.tv_nsec, 0); DCHECK_LT(ts.tv_nsec, static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond + ts.tv_nsec / Time::kNanosecondsPerMicrosecond); } struct mach_timespec TimeDelta::ToMachTimespec() const { struct mach_timespec ts; DCHECK(delta_ >= 0); ts.tv_sec = static_cast<unsigned>(delta_ / Time::kMicrosecondsPerSecond); ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond; return ts; } #endif // V8_OS_MACOSX #if V8_OS_POSIX TimeDelta TimeDelta::FromTimespec(struct timespec ts) { DCHECK_GE(ts.tv_nsec, 0); DCHECK_LT(ts.tv_nsec, static_cast<long>(Time::kNanosecondsPerSecond)); // NOLINT return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond + ts.tv_nsec / Time::kNanosecondsPerMicrosecond); } struct timespec TimeDelta::ToTimespec() const { struct timespec ts; ts.tv_sec = static_cast<time_t>(delta_ / Time::kMicrosecondsPerSecond); ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond; return ts; } #endif // V8_OS_POSIX #if V8_OS_WIN // We implement time using the high-resolution timers so that we can get // timeouts which are smaller than 10-15ms. To avoid any drift, we // periodically resync the internal clock to the system clock. class Clock final { public: Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {} Time Now() { // Time between resampling the un-granular clock for this API (1 minute). const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1); LockGuard<Mutex> lock_guard(&mutex_); // Determine current time and ticks. TimeTicks ticks = GetSystemTicks(); Time time = GetSystemTime(); // Check if we need to synchronize with the system clock due to a backwards // time change or the amount of time elapsed. TimeDelta elapsed = ticks - initial_ticks_; if (time < initial_time_ || elapsed > kMaxElapsedTime) { initial_ticks_ = ticks; initial_time_ = time; return time; } return initial_time_ + elapsed; } Time NowFromSystemTime() { LockGuard<Mutex> lock_guard(&mutex_); initial_ticks_ = GetSystemTicks(); initial_time_ = GetSystemTime(); return initial_time_; } private: static TimeTicks GetSystemTicks() { return TimeTicks::Now(); } static Time GetSystemTime() { FILETIME ft; ::GetSystemTimeAsFileTime(&ft); return Time::FromFiletime(ft); } TimeTicks initial_ticks_; Time initial_time_; Mutex mutex_; }; static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>, ThreadSafeInitOnceTrait>::type clock = LAZY_STATIC_INSTANCE_INITIALIZER; Time Time::Now() { return clock.Pointer()->Now(); } Time Time::NowFromSystemTime() { return clock.Pointer()->NowFromSystemTime(); } // Time between windows epoch and standard epoch. static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000); Time Time::FromFiletime(FILETIME ft) { if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) { return Time(); } if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() && ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) { return Max(); } int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) + (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10; return Time(us - kTimeToEpochInMicroseconds); } FILETIME Time::ToFiletime() const { DCHECK(us_ >= 0); FILETIME ft; if (IsNull()) { ft.dwLowDateTime = 0; ft.dwHighDateTime = 0; return ft; } if (IsMax()) { ft.dwLowDateTime = std::numeric_limits<DWORD>::max(); ft.dwHighDateTime = std::numeric_limits<DWORD>::max(); return ft; } uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10; ft.dwLowDateTime = static_cast<DWORD>(us); ft.dwHighDateTime = static_cast<DWORD>(us >> 32); return ft; } #elif V8_OS_POSIX Time Time::Now() { struct timeval tv; int result = gettimeofday(&tv, NULL); DCHECK_EQ(0, result); USE(result); return FromTimeval(tv); } Time Time::NowFromSystemTime() { return Now(); } Time Time::FromTimespec(struct timespec ts) { DCHECK(ts.tv_nsec >= 0); DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond)); // NOLINT if (ts.tv_nsec == 0 && ts.tv_sec == 0) { return Time(); } if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) && // NOLINT ts.tv_sec == std::numeric_limits<time_t>::max()) { return Max(); } return Time(ts.tv_sec * kMicrosecondsPerSecond + ts.tv_nsec / kNanosecondsPerMicrosecond); } struct timespec Time::ToTimespec() const { struct timespec ts; if (IsNull()) { ts.tv_sec = 0; ts.tv_nsec = 0; return ts; } if (IsMax()) { ts.tv_sec = std::numeric_limits<time_t>::max(); ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1); // NOLINT return ts; } ts.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond); ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond; return ts; } Time Time::FromTimeval(struct timeval tv) { DCHECK(tv.tv_usec >= 0); DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond)); if (tv.tv_usec == 0 && tv.tv_sec == 0) { return Time(); } if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) && tv.tv_sec == std::numeric_limits<time_t>::max()) { return Max(); } return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec); } struct timeval Time::ToTimeval() const { struct timeval tv; if (IsNull()) { tv.tv_sec = 0; tv.tv_usec = 0; return tv; } if (IsMax()) { tv.tv_sec = std::numeric_limits<time_t>::max(); tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1); return tv; } tv.tv_sec = static_cast<time_t>(us_ / kMicrosecondsPerSecond); tv.tv_usec = us_ % kMicrosecondsPerSecond; return tv; } #endif // V8_OS_WIN Time Time::FromJsTime(double ms_since_epoch) { // The epoch is a valid time, so this constructor doesn't interpret // 0 as the null time. if (ms_since_epoch == std::numeric_limits<double>::max()) { return Max(); } return Time( static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond)); } double Time::ToJsTime() const { if (IsNull()) { // Preserve 0 so the invalid result doesn't depend on the platform. return 0; } if (IsMax()) { // Preserve max without offset to prevent overflow. return std::numeric_limits<double>::max(); } return static_cast<double>(us_) / kMicrosecondsPerMillisecond; } std::ostream& operator<<(std::ostream& os, const Time& time) { return os << time.ToJsTime(); } #if V8_OS_WIN class TickClock { public: virtual ~TickClock() {} virtual int64_t Now() = 0; virtual bool IsHighResolution() = 0; }; // Overview of time counters: // (1) CPU cycle counter. (Retrieved via RDTSC) // The CPU counter provides the highest resolution time stamp and is the least // expensive to retrieve. However, the CPU counter is unreliable and should not // be used in production. Its biggest issue is that it is per processor and it // is not synchronized between processors. Also, on some computers, the counters // will change frequency due to thermal and power changes, and stop in some // states. // // (2) QueryPerformanceCounter (QPC). The QPC counter provides a high- // resolution (100 nanoseconds) time stamp but is comparatively more expensive // to retrieve. What QueryPerformanceCounter actually does is up to the HAL. // (with some help from ACPI). // According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx // in the worst case, it gets the counter from the rollover interrupt on the // programmable interrupt timer. In best cases, the HAL may conclude that the // RDTSC counter runs at a constant frequency, then it uses that instead. On // multiprocessor machines, it will try to verify the values returned from // RDTSC on each processor are consistent with each other, and apply a handful // of workarounds for known buggy hardware. In other words, QPC is supposed to // give consistent result on a multiprocessor computer, but it is unreliable in // reality due to bugs in BIOS or HAL on some, especially old computers. // With recent updates on HAL and newer BIOS, QPC is getting more reliable but // it should be used with caution. // // (3) System time. The system time provides a low-resolution (typically 10ms // to 55 milliseconds) time stamp but is comparatively less expensive to // retrieve and more reliable. class HighResolutionTickClock final : public TickClock { public: explicit HighResolutionTickClock(int64_t ticks_per_second) : ticks_per_second_(ticks_per_second) { DCHECK_LT(0, ticks_per_second); } virtual ~HighResolutionTickClock() {} int64_t Now() override { uint64_t now = QPCNowRaw(); // Intentionally calculate microseconds in a round about manner to avoid // overflow and precision issues. Think twice before simplifying! int64_t whole_seconds = now / ticks_per_second_; int64_t leftover_ticks = now % ticks_per_second_; int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) + ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_); // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow() // will never return 0. return ticks + 1; } bool IsHighResolution() override { return true; } private: int64_t ticks_per_second_; }; class RolloverProtectedTickClock final : public TickClock { public: RolloverProtectedTickClock() : rollover_(0) {} virtual ~RolloverProtectedTickClock() {} int64_t Now() override { // We use timeGetTime() to implement TimeTicks::Now(), which rolls over // every ~49.7 days. We try to track rollover ourselves, which works if // TimeTicks::Now() is called at least every 24 days. // Note that we do not use GetTickCount() here, since timeGetTime() gives // more predictable delta values, as described here: // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx // timeGetTime() provides 1ms granularity when combined with // timeBeginPeriod(). If the host application for V8 wants fast timers, it // can use timeBeginPeriod() to increase the resolution. // We use a lock-free version because the sampler thread calls it // while having the rest of the world stopped, that could cause a deadlock. base::Atomic32 rollover = base::Acquire_Load(&rollover_); uint32_t now = static_cast<uint32_t>(timeGetTime()); if ((now >> 31) != static_cast<uint32_t>(rollover & 1)) { base::Release_CompareAndSwap(&rollover_, rollover, rollover + 1); ++rollover; } uint64_t ms = (static_cast<uint64_t>(rollover) << 31) | now; return static_cast<int64_t>(ms * Time::kMicrosecondsPerMillisecond); } bool IsHighResolution() override { return false; } private: base::Atomic32 rollover_; }; static LazyStaticInstance<RolloverProtectedTickClock, DefaultConstructTrait<RolloverProtectedTickClock>, ThreadSafeInitOnceTrait>::type tick_clock = LAZY_STATIC_INSTANCE_INITIALIZER; struct CreateHighResTickClockTrait { static TickClock* Create() { // Check if the installed hardware supports a high-resolution performance // counter, and if not fallback to the low-resolution tick clock. LARGE_INTEGER ticks_per_second; if (!QueryPerformanceFrequency(&ticks_per_second)) { return tick_clock.Pointer(); } // If QPC not reliable, fallback to low-resolution tick clock. if (IsQPCReliable()) { return tick_clock.Pointer(); } return new HighResolutionTickClock(ticks_per_second.QuadPart); } }; static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait, ThreadSafeInitOnceTrait>::type high_res_tick_clock = LAZY_DYNAMIC_INSTANCE_INITIALIZER; TimeTicks TimeTicks::Now() { // Make sure we never return 0 here. TimeTicks ticks(tick_clock.Pointer()->Now()); DCHECK(!ticks.IsNull()); return ticks; } TimeTicks TimeTicks::HighResolutionNow() { // Make sure we never return 0 here. TimeTicks ticks(high_res_tick_clock.Pointer()->Now()); DCHECK(!ticks.IsNull()); return ticks; } // static bool TimeTicks::IsHighResolutionClockWorking() { return high_res_tick_clock.Pointer()->IsHighResolution(); } #else // V8_OS_WIN TimeTicks TimeTicks::Now() { return HighResolutionNow(); } TimeTicks TimeTicks::HighResolutionNow() { int64_t ticks; #if V8_OS_MACOSX static struct mach_timebase_info info; if (info.denom == 0) { kern_return_t result = mach_timebase_info(&info); DCHECK_EQ(KERN_SUCCESS, result); USE(result); } ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond * info.numer / info.denom); #elif V8_OS_SOLARIS ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond); #elif V8_OS_POSIX ticks = ClockNow(CLOCK_MONOTONIC); #endif // V8_OS_MACOSX // Make sure we never return 0 here. return TimeTicks(ticks + 1); } // static bool TimeTicks::IsHighResolutionClockWorking() { return true; } #endif // V8_OS_WIN // TODO(lpy): For windows ThreadTicks implementation, // see http://crbug.com/v8/5000 bool ThreadTicks::IsSupported() { #if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \ defined(V8_OS_MACOSX) || defined(V8_OS_ANDROID) return true; #else return false; #endif } ThreadTicks ThreadTicks::Now() { #if V8_OS_MACOSX return ThreadTicks(ComputeThreadTicks()); #elif(defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \ defined(V8_OS_ANDROID) return ThreadTicks(ClockNow(CLOCK_THREAD_CPUTIME_ID)); #elif V8_OS_WIN return ThreadTicks::GetForThread(::GetCurrentThread()); #else UNREACHABLE(); return ThreadTicks(); #endif } #if V8_OS_WIN ThreadTicks ThreadTicks::GetForThread(const HANDLE& thread_handle) { DCHECK(IsSupported()); // Get the number of TSC ticks used by the current thread. ULONG64 thread_cycle_time = 0; ::QueryThreadCycleTime(thread_handle, &thread_cycle_time); // Get the frequency of the TSC. double tsc_ticks_per_second = TSCTicksPerSecond(); if (tsc_ticks_per_second == 0) return ThreadTicks(); // Return the CPU time of the current thread. double thread_time_seconds = thread_cycle_time / tsc_ticks_per_second; return ThreadTicks( static_cast<int64_t>(thread_time_seconds * Time::kMicrosecondsPerSecond)); } // static bool ThreadTicks::IsSupportedWin() { static bool is_supported = base::CPU().has_non_stop_time_stamp_counter() && !IsQPCReliable(); return is_supported; } // static void ThreadTicks::WaitUntilInitializedWin() { while (TSCTicksPerSecond() == 0) ::Sleep(10); } double ThreadTicks::TSCTicksPerSecond() { DCHECK(IsSupported()); // The value returned by QueryPerformanceFrequency() cannot be used as the TSC // frequency, because there is no guarantee that the TSC frequency is equal to // the performance counter frequency. // The TSC frequency is cached in a static variable because it takes some time // to compute it. static double tsc_ticks_per_second = 0; if (tsc_ticks_per_second != 0) return tsc_ticks_per_second; // Increase the thread priority to reduces the chances of having a context // switch during a reading of the TSC and the performance counter. int previous_priority = ::GetThreadPriority(::GetCurrentThread()); ::SetThreadPriority(::GetCurrentThread(), THREAD_PRIORITY_HIGHEST); // The first time that this function is called, make an initial reading of the // TSC and the performance counter. static const uint64_t tsc_initial = __rdtsc(); static const uint64_t perf_counter_initial = QPCNowRaw(); // Make a another reading of the TSC and the performance counter every time // that this function is called. uint64_t tsc_now = __rdtsc(); uint64_t perf_counter_now = QPCNowRaw(); // Reset the thread priority. ::SetThreadPriority(::GetCurrentThread(), previous_priority); // Make sure that at least 50 ms elapsed between the 2 readings. The first // time that this function is called, we don't expect this to be the case. // Note: The longer the elapsed time between the 2 readings is, the more // accurate the computed TSC frequency will be. The 50 ms value was // chosen because local benchmarks show that it allows us to get a // stddev of less than 1 tick/us between multiple runs. // Note: According to the MSDN documentation for QueryPerformanceFrequency(), // this will never fail on systems that run XP or later. // https://msdn.microsoft.com/library/windows/desktop/ms644905.aspx LARGE_INTEGER perf_counter_frequency = {}; ::QueryPerformanceFrequency(&perf_counter_frequency); DCHECK_GE(perf_counter_now, perf_counter_initial); uint64_t perf_counter_ticks = perf_counter_now - perf_counter_initial; double elapsed_time_seconds = perf_counter_ticks / static_cast<double>(perf_counter_frequency.QuadPart); const double kMinimumEvaluationPeriodSeconds = 0.05; if (elapsed_time_seconds < kMinimumEvaluationPeriodSeconds) return 0; // Compute the frequency of the TSC. DCHECK_GE(tsc_now, tsc_initial); uint64_t tsc_ticks = tsc_now - tsc_initial; tsc_ticks_per_second = tsc_ticks / elapsed_time_seconds; return tsc_ticks_per_second; } #endif // V8_OS_WIN } // namespace base } // namespace v8
/// @file /// @author David Pilger <dpilger26@gmail.com> /// [GitHub Repository](https://github.com/dpilger26/NumCpp) /// @version 1.2 /// /// @section License /// Copyright 2019 David Pilger /// /// Permission is hereby granted, free of charge, to any person obtaining a copy of this /// software and associated documentation files(the "Software"), to deal in the Software /// without restriction, including without limitation the rights to use, copy, modify, /// merge, publish, distribute, sublicense, and/or sell copies of the Software, and to /// permit persons to whom the Software is furnished to do so, subject to the following /// conditions : /// /// The above copyright notice and this permission notice shall be included in all copies /// or substantial portions of the Software. /// /// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, /// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR /// PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE /// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR /// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER /// DEALINGS IN THE SOFTWARE. /// /// @section Description /// Functions for working with NdArrays /// #pragma once #include "NumCpp/Core/Error.hpp" #include "NumCpp/Core/Shape.hpp" #include "NumCpp/Core/StlAlgorithms.hpp" #include "NumCpp/Core/Types.hpp" #include "NumCpp/NdArray.hpp" #include <string> namespace nc { //============================================================================ // Method Description: /// Return the gradient of the array. /// /// NumPy Reference: https://www.numpy.org/devdocs/reference/generated/numpy.gradient.html /// /// /// @param inArray /// @param inAxis (default ROW) /// @return /// NdArray /// template<typename dtype> NdArray<double> gradient(const NdArray<dtype>& inArray, Axis inAxis = Axis::ROW) { switch (inAxis) { case Axis::ROW: { const auto inShape = inArray.shape(); if (inShape.rows < 2) { THROW_INVALID_ARGUMENT_ERROR("input array must have more than 1 row."); } // first do the first and last rows auto returnArray = NdArray<double>(inShape); for (uint32 col = 0; col < inShape.cols; ++col) { returnArray(0, col) = static_cast<double>(inArray(1, col)) - static_cast<double>(inArray(0, col)); returnArray(-1, col) = static_cast<double>(inArray(-1, col)) - static_cast<double>(inArray(-2, col)); } // then rip through the rest of the array for (uint32 col = 0; col < inShape.cols; ++col) { for (uint32 row = 1; row < inShape.rows - 1; ++row) { returnArray(row, col) = (static_cast<double>(inArray(row + 1, col)) - static_cast<double>(inArray(row - 1, col))) / 2.0; } } return returnArray; } case Axis::COL: { const auto inShape = inArray.shape(); if (inShape.cols < 2) { THROW_INVALID_ARGUMENT_ERROR("input array must have more than 1 columns."); } // first do the first and last columns auto returnArray = NdArray<double>(inShape); for (uint32 row = 0; row < inShape.rows; ++row) { returnArray(row, 0) = static_cast<double>(inArray(row, 1)) - static_cast<double>(inArray(row, 0)); returnArray(row, -1) = static_cast<double>(inArray(row, -1)) - static_cast<double>(inArray(row, -2)); } // then rip through the rest of the array for (uint32 row = 0; row < inShape.rows; ++row) { for (uint32 col = 1; col < inShape.cols - 1; ++col) { returnArray(row, col) = (static_cast<double>(inArray(row, col + 1)) - static_cast<double>(inArray(row, col - 1))) / 2.0; } } return returnArray; } default: { // will return the gradient of the flattened array if (inArray.size() < 2) { THROW_INVALID_ARGUMENT_ERROR("input array must have more than 1 element."); } auto returnArray = NdArray<double>(1, inArray.size()); returnArray[0] = static_cast<double>(inArray[1]) - static_cast<double>(inArray[0]); returnArray[-1] = static_cast<double>(inArray[-1]) - static_cast<double>(inArray[-2]); stl_algorithms::transform(inArray.cbegin() + 2, inArray.cend(), inArray.cbegin(), returnArray.begin() + 1, [](dtype value1, dtype value2) noexcept -> double { return (static_cast<double>(value1) - static_cast<double>(value2)) / 2.0; }); return returnArray; } } } }
/* This file is a part of libcds - Concurrent Data Structures library (C) Copyright Maxim Khizhinsky (libcds.dev@gmail.com) 2006-2016 Source code repo: http://github.com/khizmax/libcds/ Download: http://sourceforge.net/projects/libcds/files/ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "cppunit/cppunit_proxy.h" #include <cds/algo/split_bitstring.h> class Split_bitstrig : public CppUnitMini::TestCase { private: bool is_big_endian() { union { uint32_t ui; uint8_t ch; } byte_order; byte_order.ui = 0xFF000001; return byte_order.ch != 0x01; } protected: void cut_uint() { if ( is_big_endian() ) cut_uint_be(); else cut_uint_le(); } void cut_uint16() { if ( is_big_endian() ) cut_small_be<uint16_t>(); else cut_small_le<uint16_t>(); } void cut_uint_le() { CPPUNIT_MSG("little-endian byte order"); typedef cds::algo::split_bitstring< size_t > split_bitstring; size_t src = sizeof(src) == 8 ? 0xFEDCBA9876543210 : 0x76543210; split_bitstring splitter(src); size_t res; // Trivial case CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = splitter.cut(sizeof(src) * 8); CPPUNIT_ASSERT_EX( res == src, "src=" << src << ", result=" << res ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT(splitter.safe_cut(sizeof(src) * 8) == 0 ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); splitter.reset(); CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = splitter.cut(sizeof(src) * 8); CPPUNIT_ASSERT_EX( res == src, "src=" << src << ", result=" << res ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT(splitter.safe_cut(sizeof(src) * 8) == 0 ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); // Cut each hex digit splitter.reset(); for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); CPPUNIT_ASSERT( splitter.cut( 4 ) == i ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); // by one bit { splitter.reset(); res = 0; for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = res + (splitter.cut( 1 ) << i); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } // random cut { for ( size_t k = 0; k < 100; ++k ) { splitter.reset(); res = 0; size_t shift = 0; while ( splitter ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); int bits = rand() % 16; res = res + ( splitter.safe_cut( bits ) << shift ); shift += bits; } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } } } void cut_uint_be() { CPPUNIT_MSG("big-endian byte order"); typedef cds::algo::split_bitstring< size_t > split_bitstring; size_t src = sizeof(src) == 8 ? 0xFEDCBA9876543210 : 0x76543210; split_bitstring splitter(src); size_t res; // Trivial case CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = splitter.cut(sizeof(src) * 8); CPPUNIT_ASSERT_EX( res == src, "src=" << src << ", result=" << res ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT(splitter.safe_cut(sizeof(src) * 8) == 0 ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); splitter.reset(); CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = splitter.cut(sizeof(src) * 8); CPPUNIT_ASSERT_EX( res == src, "src=" << src << ", result=" << res ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT(splitter.safe_cut(sizeof(src) * 8) == 0 ); CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); // Cut each hex digit splitter.reset(); for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); CPPUNIT_ASSERT( splitter.cut( 4 ) == 0x0F - i ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); // by one bit { splitter.reset(); res = 0; for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = (res << 1) + splitter.cut( 1 ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } // random cut { for ( size_t k = 0; k < 100; ++k ) { splitter.reset(); res = 0; while ( splitter ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); int bits = rand() % 16; res = (res << bits) + splitter.safe_cut( bits ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } } } private: template <typename PartUInt> void cut_small_le() { CPPUNIT_MSG("little-endian byte order"); typedef PartUInt part_uint; typedef cds::algo::split_bitstring< uint64_t, part_uint > split_bitstring; uint64_t src = 0xFEDCBA9876543210; split_bitstring splitter(src); uint64_t res; // Cut each hex digit splitter.reset(); for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); CPPUNIT_ASSERT( static_cast<size_t>(splitter.cut( 4 )) == i ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); // by one bit { splitter.reset(); res = 0; for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = res + ( static_cast<uint64_t>(splitter.cut( 1 )) << i); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } // random cut { for ( size_t k = 0; k < 100; ++k ) { splitter.reset(); res = 0; size_t shift = 0; while ( splitter ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); int bits = rand() % 16; res = res + ( static_cast<uint64_t>(splitter.safe_cut( bits )) << shift ); shift += bits; } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } } } template <typename PartUInt> void cut_small_be() { CPPUNIT_MSG("big-endian byte order"); typedef PartUInt part_uint; typedef cds::algo::split_bitstring< uint64_t, part_uint > split_bitstring; uint64_t src = 0xFEDCBA9876543210; split_bitstring splitter(src); uint64_t res; // Cut each hex digit splitter.reset(); for ( size_t i = 0; i < sizeof(size_t) * 2; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); CPPUNIT_ASSERT( splitter.cut( 4 ) == 0x0F - i ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); // by one bit { splitter.reset(); res = 0; for ( size_t i = 0; i < sizeof(size_t) * 8; ++i ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); res = (res << 1) + splitter.cut( 1 ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } // random cut { for ( size_t k = 0; k < 100; ++k ) { splitter.reset(); res = 0; while ( splitter ) { CPPUNIT_ASSERT( !splitter.eos() ); CPPUNIT_ASSERT( splitter ); int bits = rand() % 16; res = (res << bits) + splitter.safe_cut( bits ); } CPPUNIT_ASSERT( splitter.eos() ); CPPUNIT_ASSERT( !splitter ); CPPUNIT_ASSERT( res == src ); } } } CPPUNIT_TEST_SUITE(Split_bitstrig); CPPUNIT_TEST(cut_uint) CPPUNIT_TEST(cut_uint16) CPPUNIT_TEST_SUITE_END(); }; CPPUNIT_TEST_SUITE_REGISTRATION(Split_bitstrig);
#include <hacdMicroAllocator.h> /*! ** ** Copyright (c) 2009 by John W. Ratcliff mailto:jratcliffscarab@gmail.com ** ** If you find this code useful or you are feeling particularily generous I would ** ask that you please go to http://www.amillionpixels.us and make a donation ** to Troy DeMolay. ** ** ** If you wish to contact me you can use the following methods: ** ** Skype ID: jratcliff63367 ** email: jratcliffscarab@gmail.com ** ** ** The MIT license: ** ** Permission is hereby granted, free of charge, to any person obtaining a copy ** of this software and associated documentation files (the "Software"), to deal ** in the Software without restriction, including without limitation the rights ** to use, copy, modify, merge, publish, distribute, sublicense, and/or sell ** copies of the Software, and to permit persons to whom the Software is furnished ** to do so, subject to the following conditions: ** ** The above copyright notice and this permission notice shall be included in all ** copies or substantial portions of the Software. ** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR ** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, ** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE ** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ** WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN ** CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <new> #include <assert.h> #include <string.h> #include <stdlib.h> #ifdef _WIN32 #include <windows.h> #endif #if defined(__APPLE__) || defined(__linux__) #include <pthread.h> #endif #pragma warning(disable:4100) namespace HACD { //================================================================================== class MemMutex { public: MemMutex(void); ~MemMutex(void); public: // Blocking Lock. void Lock(void); // Unlock. void Unlock(void); private: #if defined(_WIN32) || defined(_XBOX) CRITICAL_SECTION m_Mutex; #elif defined(__APPLE__) || defined(__linux__) pthread_mutex_t m_Mutex; #endif }; //================================================================================== MemMutex::MemMutex(void) { #if defined(_WIN32) || defined(_XBOX) InitializeCriticalSection(&m_Mutex); #elif defined(__APPLE__) || defined(__linux__) pthread_mutexattr_t mta; pthread_mutexattr_init(&mta); pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&m_Mutex, &mta); pthread_mutexattr_destroy(&mta); #endif } //================================================================================== MemMutex::~MemMutex(void) { #if defined(_WIN32) || defined(_XBOX) DeleteCriticalSection(&m_Mutex); #elif defined(__APPLE__) || defined(__linux__) pthread_mutex_destroy(&m_Mutex); #endif } //================================================================================== // Blocking Lock. //================================================================================== void MemMutex::Lock(void) { #if defined(_WIN32) || defined(_XBOX) EnterCriticalSection(&m_Mutex); #elif defined(__APPLE__) || defined(__linux__) pthread_mutex_lock(&m_Mutex); #endif } //================================================================================== // Unlock. //================================================================================== void MemMutex::Unlock(void) { #if defined(_WIN32) || defined(_XBOX) LeaveCriticalSection(&m_Mutex); #elif defined(__APPLE__) || defined(__linux__) pthread_mutex_unlock(&m_Mutex); #endif } struct ChunkHeader { ChunkHeader *mNextChunk; }; // interface to add and remove new chunks to the master list. class MicroChunkUpdate { public: virtual void addMicroChunk(NxU8 *memStart,NxU8 *memEnd,MemoryChunk *chunk) = 0; virtual void removeMicroChunk(MemoryChunk *chunk) = 0; }; class MemoryHeader { public: MemoryHeader *mNext; }; // a single fixed size chunk for micro-allocations. class MemoryChunk { public: MemoryChunk(void) { mData = 0; mDataEnd = 0; mUsedCount = 0; mFreeList = 0; mMyHeap = false; mChunkSize = 0; } NxU8 * init(NxU8 *chunkBase,NxU32 chunkSize,NxU32 maxChunks) { mChunkSize = chunkSize; mData = chunkBase; mDataEnd = mData+(chunkSize*maxChunks); mFreeList = (MemoryHeader *) mData; MemoryHeader *scan = mFreeList; NxU8 *data = mData; data+=chunkSize; for (NxU32 i=0; i<(maxChunks-1); i++) { MemoryHeader *next = (MemoryHeader *)data; scan->mNext = next; data+=chunkSize; scan = next; } scan->mNext = 0; return mDataEnd; } inline void * allocate(MicroHeap *heap,NxU32 chunkSize,NxU32 maxChunks,MicroChunkUpdate *update) { void *ret = 0; if ( mData == 0 ) { mMyHeap = true; mData = (NxU8 *)heap->micro_malloc( chunkSize * maxChunks ); init(mData,chunkSize,maxChunks); update->addMicroChunk(mData,mDataEnd,this); } if ( mFreeList ) { mUsedCount++; ret = mFreeList; mFreeList = mFreeList->mNext; } return ret; } inline void deallocate(void *p,MicroHeap *heap,MicroChunkUpdate *update) { #ifdef _DEBUG assert(mUsedCount); NxU8 *s = (NxU8 *)p; assert( s >= mData && s < mDataEnd ); #endif MemoryHeader *mh = mFreeList; mFreeList = (MemoryHeader *)p; mFreeList->mNext = mh; mUsedCount--; if ( mUsedCount == 0 && mMyHeap ) // free the heap back to the application if we are done with this. { update->removeMicroChunk(this); heap->micro_free(mData); mMyHeap = false; mData = 0; mDataEnd = 0; mFreeList = 0; } } NxU32 getChunkSize(void) const { return mChunkSize; }; bool isInside(const NxU8 *p) const { return p>=mData && p < mDataEnd; } private: bool mMyHeap; NxU8 *mData; NxU8 *mDataEnd; NxU32 mUsedCount; MemoryHeader *mFreeList; NxU32 mChunkSize; }; #define DEFAULT_CHUNKS 32 class MemoryChunkChunk { public: MemoryChunkChunk(void) { mNext = 0; mChunkSize = 0; mMaxChunks = 0; } ~MemoryChunkChunk(void) { } inline void * allocate(MemoryChunk *&current,MicroChunkUpdate *update) { void *ret = 0; MemoryChunkChunk *scan = this; while ( scan && ret == 0 ) { for (NxU32 i=0; i<DEFAULT_CHUNKS; i++) { ret = scan->mChunks[i].allocate(mHeap,mChunkSize,mMaxChunks,update); if ( ret ) { current = &scan->mChunks[i]; scan = 0; break; } } if ( scan ) scan = scan->mNext; } if ( !ret ) { MemoryChunkChunk *mcc = (MemoryChunkChunk *)mHeap->micro_malloc( sizeof(MemoryChunkChunk) ); new ( mcc ) MemoryChunkChunk; MemoryChunkChunk *onext = mNext; mNext = mcc; mcc->mNext = onext; ret = mcc->mChunks[0].allocate(mHeap,mChunkSize,mMaxChunks,update); current = &mcc->mChunks[0]; } return ret; } NxU8 * init(NxU8 *chunkBase,NxU32 fixedSize,NxU32 chunkSize,MemoryChunk *&current,MicroHeap *heap) { mHeap = heap; mChunkSize = chunkSize; mMaxChunks = fixedSize/chunkSize; current = &mChunks[0]; chunkBase = mChunks[0].init(chunkBase,chunkSize,mMaxChunks); return chunkBase; } MicroHeap *mHeap; NxU32 mChunkSize; NxU32 mMaxChunks; MemoryChunkChunk *mNext; MemoryChunk mChunks[DEFAULT_CHUNKS]; }; class FixedMemory { public: FixedMemory(void) { mCurrent = 0; } void * allocate(MicroChunkUpdate *update) { void *ret = mCurrent->allocate(mChunks.mHeap,mChunks.mChunkSize,mChunks.mMaxChunks,update); if ( ret == 0 ) { ret = mChunks.allocate(mCurrent,update); } return ret; } NxU8 * init(NxU8 *chunkBase,NxU32 chunkSize,NxU32 fixedSize,MicroHeap *heap) { mMemBegin = chunkBase; mMemEnd = chunkBase+fixedSize; mChunks.init(chunkBase,fixedSize,chunkSize,mCurrent,heap); return mMemEnd; } NxU8 *mMemBegin; NxU8 *mMemEnd; MemoryChunk *mCurrent; // the current memory chunk we are operating in. MemoryChunkChunk mChunks; // the collection of all memory chunks used. }; class MicroChunk { public: void set(NxU8 *memStart,NxU8 *memEnd,MemoryChunk *mc) { mMemStart = memStart; mMemEnd = memEnd; mChunk = mc; mPad = 0; } inline bool inside(const NxU8 *p) const { return p >= mMemStart && p < mMemEnd; } NxU8 *mMemStart; NxU8 *mMemEnd; MemoryChunk *mChunk; NxU8 *mPad; // padding to make it 16 byte aligned. }; class MyMicroAllocator : public MicroAllocator, public MicroChunkUpdate, public MemMutex { public: MyMicroAllocator(MicroHeap *heap,void *baseMem,NxU32 initialSize,NxU32 chunkSize) { mLastMicroChunk = 0; mMicroChunks = 0; mMicroChunkCount = 0; mMaxMicroChunks = 0; mHeap = heap; mChunkSize = chunkSize; // 0 through 8 bytes for (NxU32 i=0; i<=8; i++) { mFixedAllocators[i] = &mAlloc[0]; } // 9 through 16 bytes for (NxU32 i=9; i<=16; i++) { mFixedAllocators[i] = &mAlloc[1]; } // 17 through 32 bytes for (NxU32 i=17; i<=32; i++) { mFixedAllocators[i] = &mAlloc[2]; } // 33 through 64 for (NxU32 i=33; i<=64; i++) { mFixedAllocators[i] = &mAlloc[3]; } // 65 through 128 for (NxU32 i=65; i<=128; i++) { mFixedAllocators[i] = &mAlloc[4]; } // 129 through 255 for (NxU32 i=129; i<257; i++) { mFixedAllocators[i] = &mAlloc[5]; } mBaseMem = (NxU8 *)baseMem; mBaseMemEnd = mBaseMem+initialSize; NxU8 *chunkBase = (NxU8 *)baseMem+sizeof(MyMicroAllocator); chunkBase+=32; NxU64 ptr = (NxU64)chunkBase; ptr = ptr>>4; ptr = ptr<<4; // make sure it is 16 byte aligned. chunkBase = (NxU8 *)ptr; mChunkStart = chunkBase; chunkBase = mAlloc[0].init(chunkBase,8,chunkSize,heap); chunkBase = mAlloc[1].init(chunkBase,16,chunkSize,heap); chunkBase = mAlloc[2].init(chunkBase,32,chunkSize,heap); chunkBase = mAlloc[3].init(chunkBase,64,chunkSize,heap); chunkBase = mAlloc[4].init(chunkBase,128,chunkSize,heap); chunkBase = mAlloc[5].init(chunkBase,256,chunkSize,heap); mChunkEnd = chunkBase; assert(chunkBase <= mBaseMemEnd ); } ~MyMicroAllocator(void) { if ( mMicroChunks ) { mHeap->micro_free(mMicroChunks); } } virtual NxU32 getChunkSize(MemoryChunk *chunk) { return chunk ? chunk->getChunkSize() : 0; } // we have to steal one byte out of every allocation to record the size, so we can efficiently de-allocate it later. virtual void * malloc(size_t size) { void *ret = 0; Lock(); assert( size <= 256 ); if ( size <= 256 ) { ret = mFixedAllocators[size]->allocate(this); } Unlock(); return ret; } virtual void free(void *p,MemoryChunk *chunk) { Lock(); chunk->deallocate(p,mHeap,this); Unlock(); } // perform a binary search on the sorted list of chunks. MemoryChunk * binarySearchMicroChunks(const NxU8 *p) { MemoryChunk *ret = 0; NxU32 low = 0; NxU32 high = mMicroChunkCount; while ( low != high ) { NxU32 mid = (high-low)/2+low; MicroChunk &chunk = mMicroChunks[mid]; if ( chunk.inside(p)) { mLastMicroChunk = &chunk; ret = chunk.mChunk; break; } else { if ( p > chunk.mMemEnd ) { low = mid+1; } else { high = mid; } } } return ret; } virtual MemoryChunk * isMicroAlloc(const void *p) // returns true if this pointer is handled by the micro-allocator. { MemoryChunk *ret = 0; Lock(); const NxU8 *s = (const NxU8 *)p; if ( s >= mChunkStart && s < mChunkEnd ) { NxU32 index = (NxU32)(s-mChunkStart)/mChunkSize; assert(index>=0 && index < 6 ); ret = &mAlloc[index].mChunks.mChunks[0]; assert( ret->isInside(s) ); } else if ( mMicroChunkCount ) { if ( mLastMicroChunk && mLastMicroChunk->inside(s) ) { ret = mLastMicroChunk->mChunk; } else { if ( mMicroChunkCount >= 4 ) { ret = binarySearchMicroChunks(s); #ifdef _DEBUG if (ret ) { assert( ret->isInside(s) ); } else { for (NxU32 i=0; i<mMicroChunkCount; i++) { assert( !mMicroChunks[i].inside(s) ); } } #endif } else { for (NxU32 i=0; i<mMicroChunkCount; i++) { if ( mMicroChunks[i].inside(s) ) { ret = mMicroChunks[i].mChunk; assert( ret->isInside(s) ); mLastMicroChunk = &mMicroChunks[i]; break; } } } } } #ifdef _DEBUG if ( ret ) assert( ret->isInside(s) ); #endif Unlock(); return ret; } MicroHeap * getMicroHeap(void) const { return mHeap; }; void allocateMicroChunks(void) { if ( mMaxMicroChunks == 0 ) { mMaxMicroChunks = 64; // initial reserve. mMicroChunks = (MicroChunk *)mHeap->micro_malloc( sizeof(MicroChunk)*mMaxMicroChunks ); } else { mMaxMicroChunks*=2; mMicroChunks = (MicroChunk *)mHeap->micro_realloc( mMicroChunks, sizeof(MicroChunk)*mMaxMicroChunks); } } // perform an insertion sort of the new chunk. virtual void addMicroChunk(NxU8 *memStart,NxU8 *memEnd,MemoryChunk *chunk) { if ( mMicroChunkCount >= mMaxMicroChunks ) { allocateMicroChunks(); } bool inserted = false; for (NxU32 i=0; i<mMicroChunkCount; i++) { if ( memEnd < mMicroChunks[i].mMemStart ) { for (NxU32 j=mMicroChunkCount; j>i; j--) { mMicroChunks[j] = mMicroChunks[j-1]; } mMicroChunks[i].set( memStart, memEnd, chunk ); mLastMicroChunk = &mMicroChunks[i]; mMicroChunkCount++; inserted = true; break; } } if ( !inserted ) { mMicroChunks[mMicroChunkCount].set(memStart,memEnd,chunk); mLastMicroChunk = &mMicroChunks[mMicroChunkCount]; mMicroChunkCount++; } } virtual void removeMicroChunk(MemoryChunk *chunk) { mLastMicroChunk = 0; #ifdef _DEBUG bool removed = false; #endif for (NxU32 i=0; i<mMicroChunkCount; i++) { if ( mMicroChunks[i].mChunk == chunk ) { mMicroChunkCount--; for (NxU32 j=i; j<mMicroChunkCount; j++) { mMicroChunks[j] = mMicroChunks[j+1]; } #ifdef _DEBUG removed = true; #endif break; } } #ifdef _DEBUG assert(removed); #endif } inline void * inline_malloc(size_t size) { Lock(); void *ret = mFixedAllocators[size]->allocate(this); Unlock(); return ret; } inline void inline_free(void *p,MemoryChunk *chunk) // free relative to previously located MemoryChunk { Lock(); chunk->deallocate(p,mHeap,this); Unlock(); } inline MemoryChunk * inline_isMicroAlloc(const void *p) // returns pointer to the chunk this memory belongs to, or null if not a micro-allocated block. { MemoryChunk *ret = 0; Lock(); const NxU8 *s = (const NxU8 *)p; if ( s >= mChunkStart && s < mChunkEnd ) { NxU32 index = (NxU32)(s-mChunkStart)/mChunkSize; assert(index>=0 && index < 6 ); ret = &mAlloc[index].mChunks.mChunks[0]; } else if ( mMicroChunkCount ) { if ( mLastMicroChunk && mLastMicroChunk->inside(s) ) { ret = mLastMicroChunk->mChunk; } else { if ( mMicroChunkCount >= 4 ) { ret = binarySearchMicroChunks(s); } else { for (NxU32 i=0; i<mMicroChunkCount; i++) { if ( mMicroChunks[i].inside(s) ) { ret = mMicroChunks[i].mChunk; mLastMicroChunk = &mMicroChunks[i]; break; } } } } } Unlock(); return ret; } private: MicroHeap *mHeap; NxU8 *mBaseMem; NxU8 *mBaseMemEnd; FixedMemory *mFixedAllocators[257]; NxU32 mChunkSize; NxU8 *mChunkStart; NxU8 *mChunkEnd; NxU32 mMaxMicroChunks; NxU32 mMicroChunkCount; MicroChunk *mLastMicroChunk; MicroChunk *mMicroChunks; FixedMemory mAlloc[6]; }; MicroAllocator *createMicroAllocator(MicroHeap *heap,NxU32 chunkSize) { NxU32 initialSize = chunkSize*6+sizeof(MyMicroAllocator)+32; void *baseMem = heap->micro_malloc(initialSize); MyMicroAllocator *mc = (MyMicroAllocator *)baseMem; new ( mc ) MyMicroAllocator(heap,baseMem,initialSize,chunkSize); return static_cast< MicroAllocator *>(mc); } void releaseMicroAllocator(MicroAllocator *m) { MyMicroAllocator *mc = static_cast< MyMicroAllocator *>(m); MicroHeap *mh = mc->getMicroHeap(); mc->~MyMicroAllocator(); mh->micro_free(mc); } class MyHeapManager : public MicroHeap, public HeapManager { public: MyHeapManager(NxU32 defaultChunkSize) { mMicro = createMicroAllocator(this,defaultChunkSize); } ~MyHeapManager(void) { releaseMicroAllocator(mMicro); } // heap allocations used by the micro allocator. virtual void * micro_malloc(size_t size) { return ::malloc(size); } virtual void micro_free(void *p) { return ::free(p); } virtual void * micro_realloc(void *oldMem,size_t newSize) { return ::realloc(oldMem,newSize); } virtual void * heap_malloc(size_t size) { void *ret; if ( size <= 256 ) // micro allocator only handles allocations between 0 and 256 bytes in length. { ret = mMicro->malloc(size); } else { ret = ::malloc(size); } return ret; } virtual void heap_free(void *p) { MemoryChunk *chunk = mMicro->isMicroAlloc(p); if ( chunk ) { mMicro->free(p,chunk); } else { ::free(p); } } virtual void * heap_realloc(void *oldMem,size_t newSize) { void *ret = 0; MemoryChunk *chunk = mMicro->isMicroAlloc(oldMem); if ( chunk ) { ret = heap_malloc(newSize); NxU32 oldSize = chunk->getChunkSize(); if ( oldSize < newSize ) { memcpy(ret,oldMem,oldSize); } else { memcpy(ret,oldMem,newSize); } mMicro->free(oldMem,chunk); } else { ret = ::realloc(oldMem,newSize); } return ret; } inline void * inline_heap_malloc(size_t size) { return size<=256 ? ((MyMicroAllocator *)mMicro)->inline_malloc(size) : ::malloc(size); } inline void inline_heap_free(void *p) { MemoryChunk *chunk = ((MyMicroAllocator *)mMicro)->inline_isMicroAlloc(p); if ( chunk ) { ((MyMicroAllocator *)mMicro)->inline_free(p,chunk); } else { ::free(p); } } private: MicroAllocator *mMicro; }; HeapManager * createHeapManager(NxU32 defaultChunkSize) { MyHeapManager *m = (MyHeapManager *)::malloc(sizeof(MyHeapManager)); new ( m ) MyHeapManager(defaultChunkSize); return static_cast< HeapManager *>(m); } void releaseHeapManager(HeapManager *heap) { MyHeapManager *m = static_cast< MyHeapManager *>(heap); m->~MyHeapManager(); free(m); } #define TEST_SIZE 63 #define TEST_ALLOC_COUNT 8192 #define TEST_RUN 40000000 #define TEST_INLINE 1 #ifdef _WIN32 #include <windows.h> #pragma comment(lib,"winmm.lib") #else static NxU32 timeGetTime(void) { return 0; } #endif #include <stdio.h> void performUnitTests(void) { void *allocs[TEST_ALLOC_COUNT]; for (NxU32 i=0; i<TEST_ALLOC_COUNT; i++) { allocs[i] = 0; } HeapManager *hm = createHeapManager(65536*32); { NxU32 stime = timeGetTime(); srand(0); for (NxU32 i=0; i<TEST_RUN; i++) { NxU32 index = rand()&(TEST_ALLOC_COUNT-1); if ( allocs[index] ) { #if TEST_INLINE heap_free(hm, allocs[index] ); #else hm->heap_free( allocs[index] ); #endif allocs[index] = 0; } else { NxU32 asize = (rand()&TEST_SIZE); if ( (rand()&127)==0) asize+=256; // one out of every 15 allocs is larger than 256 bytes. #if TEST_INLINE allocs[index] = heap_malloc(hm,asize); #else allocs[index] = hm->heap_malloc(asize); #endif } } for (NxU32 i=0; i<TEST_ALLOC_COUNT; i++) { if ( allocs[i] ) { #if TEST_INLINE heap_free(hm,allocs[i] ); #else hm->heap_free(allocs[i] ); #endif allocs[i] = 0; } } NxU32 etime = timeGetTime(); printf("Micro allocation test took %d milliseconds.\r\n", etime - stime ); } { NxU32 stime = timeGetTime(); srand(0); for (NxU32 i=0; i<TEST_RUN; i++) { NxU32 index = rand()&(TEST_ALLOC_COUNT-1); if ( allocs[index] ) { ::free( allocs[index] ); allocs[index] = 0; } else { NxU32 asize = (rand()&TEST_SIZE); if ( (rand()&127)==0) asize+=256; // one out of every 15 allocs is larger than 256 bytes. allocs[index] = ::malloc(asize); } } for (NxU32 i=0; i<TEST_ALLOC_COUNT; i++) { if ( allocs[i] ) { ::free(allocs[i] ); allocs[i] = 0; } } NxU32 etime = timeGetTime(); printf("Standard malloc/free test took %d milliseconds.\r\n", etime - stime ); } releaseHeapManager(hm); } void * heap_malloc(HeapManager *hm,size_t size) { return ((MyHeapManager *)hm)->inline_heap_malloc(size); } void heap_free(HeapManager *hm,void *p) { ((MyHeapManager *)hm)->inline_heap_free(p); } void * heap_realloc(HeapManager *hm,void *oldMem,size_t newSize) { return hm->heap_realloc(oldMem,newSize); } } // end of namespace
//================================================================================================= /*! // \file src/mathtest/smatdmatkron/DCbHDb.cpp // \brief Source file for the DCbHDb sparse matrix/dense matrix Kronecker product math test // // Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= //************************************************************************************************* // Includes //************************************************************************************************* #include <cstdlib> #include <iostream> #include <blaze/math/CompressedMatrix.h> #include <blaze/math/DiagonalMatrix.h> #include <blaze/math/DynamicMatrix.h> #include <blaze/math/HermitianMatrix.h> #include <blazetest/mathtest/Creator.h> #include <blazetest/mathtest/smatdmatkron/OperationTest.h> #include <blazetest/system/MathTest.h> #ifdef BLAZE_USE_HPX_THREADS # include <hpx/hpx_main.hpp> #endif //================================================================================================= // // MAIN FUNCTION // //================================================================================================= //************************************************************************************************* int main() { std::cout << " Running 'DCbHDb'..." << std::endl; using blazetest::mathtest::TypeB; try { // Matrix type definitions using DCb = blaze::DiagonalMatrix< blaze::CompressedMatrix<TypeB> >; using HDb = blaze::HermitianMatrix< blaze::DynamicMatrix<TypeB> >; // Creator type definitions using CDCb = blazetest::Creator<DCb>; using CHDb = blazetest::Creator<HDb>; // Running tests with small matrices for( size_t i=0UL; i<=4UL; ++i ) { for( size_t j=0UL; j<=i; ++j ) { for( size_t k=0UL; k<=4UL; ++k ) { RUN_SMATDMATKRON_OPERATION_TEST( CDCb( i, j ), CHDb( k ) ); } } } // Running tests with large matrices RUN_SMATDMATKRON_OPERATION_TEST( CDCb( 9UL, 7UL ), CHDb( 8UL ) ); RUN_SMATDMATKRON_OPERATION_TEST( CDCb( 16UL, 7UL ), CHDb( 15UL ) ); } catch( std::exception& ex ) { std::cerr << "\n\n ERROR DETECTED during sparse matrix/dense matrix Kronecker product:\n" << ex.what() << "\n"; return EXIT_FAILURE; } return EXIT_SUCCESS; } //*************************************************************************************************
/* Copyright (c) 2019 Sogou, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Author: Xie Han (xiehan@sogou-inc.com) */ #include <errno.h> #include <stdlib.h> #include <string.h> #include "HttpMessage.h" namespace protocol { struct HttpMessageBlock { struct list_head list; const void *ptr; size_t size; }; bool HttpMessage::append_output_body(const void *buf, size_t size) { size_t n = sizeof (struct HttpMessageBlock) + size; struct HttpMessageBlock *block = (struct HttpMessageBlock *)malloc(n); if (block) { memcpy(block + 1, buf, size); block->ptr = block + 1; block->size = size; list_add_tail(&block->list, &this->output_body); this->output_body_size += size; return true; } return false; } bool HttpMessage::append_output_body_nocopy(const void *buf, size_t size) { size_t n = sizeof (struct HttpMessageBlock); struct HttpMessageBlock *block = (struct HttpMessageBlock *)malloc(n); if (block) { block->ptr = buf; block->size = size; list_add_tail(&block->list, &this->output_body); this->output_body_size += size; return true; } return false; } void HttpMessage::clear_output_body() { struct HttpMessageBlock *block; struct list_head *pos, *tmp; list_for_each_safe(pos, tmp, &this->output_body) { block = list_entry(pos, struct HttpMessageBlock, list); list_del(pos); free(block); } this->output_body_size = 0; } struct list_head *HttpMessage::combine_from(struct list_head *pos, size_t size) { size_t n = sizeof (struct HttpMessageBlock) + size; struct HttpMessageBlock *block = (struct HttpMessageBlock *)malloc(n); struct HttpMessageBlock *entry; char *ptr; if (block) { block->ptr = block + 1; block->size = size; ptr = (char *)block->ptr; do { entry = list_entry(pos, struct HttpMessageBlock, list); pos = pos->next; list_del(&entry->list); memcpy(ptr, entry->ptr, entry->size); ptr += entry->size; free(entry); } while (pos != &this->output_body); list_add_tail(&block->list, &this->output_body); return &block->list; } return NULL; } int HttpMessage::encode(struct iovec vectors[], int max) { const char *start_line[3]; http_header_cursor_t cursor; struct HttpMessageHeader header; struct HttpMessageBlock *block; struct list_head *pos; size_t size; int i; start_line[0] = http_parser_get_method(this->parser); if (start_line[0]) { start_line[1] = http_parser_get_uri(this->parser); start_line[2] = http_parser_get_version(this->parser); } else { start_line[0] = http_parser_get_version(this->parser); start_line[1] = http_parser_get_code(this->parser); start_line[2] = http_parser_get_phrase(this->parser); } if (!start_line[0] || !start_line[1] || !start_line[2]) { errno = EBADMSG; return -1; } vectors[0].iov_base = (void *)start_line[0]; vectors[0].iov_len = strlen(start_line[0]); vectors[1].iov_base = (void *)" "; vectors[1].iov_len = 1; vectors[2].iov_base = (void *)start_line[1]; vectors[2].iov_len = strlen(start_line[1]); vectors[3].iov_base = (void *)" "; vectors[3].iov_len = 1; vectors[4].iov_base = (void *)start_line[2]; vectors[4].iov_len = strlen(start_line[2]); vectors[5].iov_base = (void *)"\r\n"; vectors[5].iov_len = 2; i = 6; http_header_cursor_init(&cursor, this->parser); while (http_header_cursor_next(&header.name, &header.name_len, &header.value, &header.value_len, &cursor) == 0) { if (i == max) break; vectors[i].iov_base = (void *)header.name; vectors[i].iov_len = header.name_len + 2 + header.value_len + 2; i++; } http_header_cursor_deinit(&cursor); if (i + 1 >= max) { errno = EOVERFLOW; return -1; } vectors[i].iov_base = (void *)"\r\n"; vectors[i].iov_len = 2; i++; size = this->output_body_size; list_for_each(pos, &this->output_body) { if (i + 1 == max && pos != this->output_body.prev) { pos = this->combine_from(pos, size); if (!pos) return -1; } block = list_entry(pos, struct HttpMessageBlock, list); vectors[i].iov_base = (void *)block->ptr; vectors[i].iov_len = block->size; size -= block->size; i++; } return i; } inline int HttpMessage::append(const void *buf, size_t *size) { int ret = http_parser_append_message(buf, size, this->parser); if (ret >= 0) { this->cur_size += *size; if (this->cur_size > this->size_limit) { errno = EMSGSIZE; ret = -1; } } else if (ret == -2) { errno = EBADMSG; ret = -1; } return ret; } HttpMessage::HttpMessage(HttpMessage&& msg) { this->size_limit = msg.size_limit; msg.size_limit = (size_t)-1; this->parser = msg.parser; msg.parser = new http_parser_t; http_parser_init(this->parser->is_resp, msg.parser); INIT_LIST_HEAD(&this->output_body); list_splice_init(&msg.output_body, &this->output_body); this->output_body_size = msg.output_body_size; msg.output_body_size = 0; this->cur_size = msg.cur_size; msg.cur_size = 0; } HttpMessage& HttpMessage::operator = (HttpMessage&& msg) { if (&msg != this) { this->size_limit = msg.size_limit; msg.size_limit = (size_t)-1; http_parser_deinit(this->parser); delete this->parser; this->parser = msg.parser; msg.parser = new http_parser_t; http_parser_init(this->parser->is_resp, msg.parser); this->clear_output_body(); list_splice_init(&msg.output_body, &this->output_body); this->output_body_size = msg.output_body_size; msg.output_body_size = 0; this->cur_size = msg.cur_size; msg.cur_size = 0; } return *this; } #define HTTP_100_STATUS_LINE "HTTP/1.1 100 Continue" #define HTTP_400_STATUS_LINE "HTTP/1.1 400 Bad Request" #define HTTP_413_STATUS_LINE "HTTP/1.1 413 Request Entity Too Large" #define HTTP_417_STATUS_LINE "HTTP/1.1 417 Expectation Failed" #define CONTENT_LENGTH_ZERO "Content-Length: 0" #define CONNECTION_CLOSE "Connection: close" #define CRLF "\r\n" #define HTTP_100_RESP HTTP_100_STATUS_LINE CRLF \ CRLF #define HTTP_400_RESP HTTP_400_STATUS_LINE CRLF \ CONTENT_LENGTH_ZERO CRLF \ CONNECTION_CLOSE CRLF \ CRLF #define HTTP_413_RESP HTTP_413_STATUS_LINE CRLF \ CONTENT_LENGTH_ZERO CRLF \ CONNECTION_CLOSE CRLF \ CRLF #define HTTP_417_RESP HTTP_417_STATUS_LINE CRLF \ CONTENT_LENGTH_ZERO CRLF \ CONNECTION_CLOSE CRLF \ CRLF int HttpRequest::handle_expect_continue() { size_t trans_len = this->parser->transfer_length; int ret; if (trans_len != (size_t)-1) { if (this->parser->header_offset + trans_len > this->size_limit) { this->feedback(HTTP_417_RESP, strlen(HTTP_417_RESP)); errno = EMSGSIZE; return -1; } } ret = this->feedback(HTTP_100_RESP, strlen(HTTP_100_RESP)); if (ret != strlen(HTTP_100_RESP)) { if (ret >= 0) errno = EAGAIN; return -1; } return 0; } int HttpRequest::append(const void *buf, size_t *size) { int ret = HttpMessage::append(buf, size); if (ret == 0) { if (this->parser->expect_continue && http_parser_header_complete(this->parser)) { this->parser->expect_continue = 0; ret = this->handle_expect_continue(); } } else if (ret < 0) { if (errno == EBADMSG) this->feedback(HTTP_400_RESP, strlen(HTTP_400_RESP)); else if (errno == EMSGSIZE) this->feedback(HTTP_413_RESP, strlen(HTTP_413_RESP)); } return ret; } int HttpResponse::append(const void *buf, size_t *size) { int ret = HttpMessage::append(buf, size); if (ret > 0 && *http_parser_get_code(this->parser) == '1') { http_parser_deinit(this->parser); http_parser_init(1, this->parser); ret = 0; } return ret; } }
/********************************************************************** * LeechCraft - modular cross-platform feature rich internet client. * Copyright (C) 2012 Georg Rudoy * * Boost Software License - Version 1.0 - August 17th, 2003 * * Permission is hereby granted, free of charge, to any person or organization * obtaining a copy of the software and accompanying documentation covered by * this license (the "Software") to use, reproduce, display, distribute, * execute, and transmit the Software, and to prepare derivative works of the * Software, and to permit third-parties to whom the Software is furnished to * do so, all subject to the following: * * The copyright notices in the Software and this entire statement, including * the above license grant, this restriction and the following disclaimer, * must be included in all copies of the Software, in whole or in part, and * all derivative works of the Software, unless such copies or derivative * works are solely in the form of machine-executable object code generated by * a source language processor. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. **********************************************************************/ #include "gacts.h" #include <QIcon> #include <util/util.h> #include <qxtglobalshortcut.h> #include <interfaces/entitytesthandleresult.h> namespace LeechCraft { namespace GActs { void Plugin::Init (ICoreProxy_ptr) { Util::InstallTranslator ("gacts"); } void Plugin::SecondInit () { } QByteArray Plugin::GetUniqueID () const { return "org.LeechCraft.GActs"; } void Plugin::Release () { qDeleteAll (RegisteredShortcuts_.values ()); RegisteredShortcuts_.clear (); } QString Plugin::GetName () const { return "GActs"; } QString Plugin::GetInfo () const { return tr ("Provides support for Global Actions registration for other LeechCraft plugins."); } QIcon Plugin::GetIcon () const { return QIcon (); } EntityTestHandleResult Plugin::CouldHandle (const Entity& e) const { const bool good = (e.Mime_ == "x-leechcraft/global-action-register" || e.Mime_ == "x-leechcraft/global-action-unregister") && e.Additional_.contains ("ActionID"); return EntityTestHandleResult (good ? EntityTestHandleResult::PIdeal : EntityTestHandleResult::PNone); } void Plugin::Handle (Entity e) { const QByteArray& id = e.Additional_ ["ActionID"].toByteArray (); if (e.Mime_ == "x-leechcraft/global-action-unregister") { delete RegisteredShortcuts_.take (id); return; } const QKeySequence& seq = e.Additional_ ["Shortcut"].value<QKeySequence> (); if (seq.isEmpty ()) return; if (const auto sh = RegisteredShortcuts_.value (id)) { sh->setShortcut (seq); RegisterChildren (sh, e); return; } QObject *receiver = e.Additional_ ["Receiver"].value<QObject*> (); if (!receiver) return; const QByteArray& method = e.Additional_ ["Method"].toByteArray (); if (method.isEmpty ()) return; connect (receiver, SIGNAL (destroyed (QObject*)), this, SLOT (handleReceiverDeleted ()), Qt::UniqueConnection); const auto sh = new QxtGlobalShortcut (seq, receiver); connect (sh, SIGNAL (activated ()), receiver, method); RegisteredShortcuts_ [id] = sh; RegisterChildren (sh, e); } void Plugin::RegisterChildren (QxtGlobalShortcut *sh, const Entity& e) { for (const auto& seqVar : e.Additional_ ["AltShortcuts"].toList ()) { const auto& subseq = seqVar.value<QKeySequence> (); if (subseq.isEmpty ()) continue; const auto subsh = new QxtGlobalShortcut (subseq, sh); connect (subsh, SIGNAL (activated ()), sh, SIGNAL (activated ())); } } void Plugin::handleReceiverDeleted () { for (auto i = RegisteredShortcuts_.begin (); i != RegisteredShortcuts_.end (); ) { if ((*i)->parent () != sender ()) ++i; else i = RegisteredShortcuts_.erase (i); } } } } LC_EXPORT_PLUGIN (leechcraft_gacts, LeechCraft::GActs::Plugin);
#include<iostream> #include<vector> #include <algorithm> #include <math.h> long maxProfit(int k, std::vector<int> profit) { if(k > profit.size()/2){ return 0; } std::vector<long> result; long temp = 0; for(int i = 0; i + k -1 < profit.size()/2; i++){ int j = 0; int it = i; while(j < k){ temp += profit[it]; temp += profit[it + profit.size()/2]; ++j; ++it; } std::cout << temp << '\n'; result.push_back(temp); temp = 0; } return *std::max_element(result.begin(), result.end()); } int main(){ std::vector<int> prof = {2, 5, 8, 9, 0, 1, -3, -7, -4, 6}; std::cout << maxProfit(5, prof) << '\n'; return 0; }
#include <stdio.h> #include <string.h> #include <string> #include <set> #include <algorithm> #include <iostream> using namespace std; const int N = 20; const int M = 50; int ans, g[N]; set<string> rec; int d[M], c[M]; char sta[M][N]; void handle() { for (int i = 0; i < 32; i++) { int k = i, t = 0; for (int j = 0; j < 5; j++) { int m = k % 2; k /= 2; t = t * 2 + m; sta[i][4-j] = '0' + m; } d[i] = t; } } void rechange(int id, char* str) { int t = 0, k = 0; for (int i = 0; i < 5; i++) { t = t * 2 + str[i] - '0'; k = k * 2 + str[4-i] - '0'; } c[t]++; c[k]++; } bool init() { rec.clear(); ans = 0; memset(c, 0, sizeof(c)); char str[N]; for (int i = 0; i < 10; i++) { scanf("%s", str); if (strcmp(str, "END") == 0) return false; rechange(i, str); } return true; } void add(char (*x)[N], char (*y)[N]) { string str; for (int i = 0; i < 5; i++) str = str + x[i]; for (int i = 0; i < 5; i++) str = str + y[i]; rec.insert(str); } void ret(char(*x)[N]) { char y[N][N]; for (int i = 0; i < 5; i++) strcpy(y[i], x[i]); for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) x[j][4-i] = y[i][j]; } } void del() { ans++; char x[N][N], y[N][N]; for (int i = 0; i < 5; i++) { strcpy(x[i], sta[g[i]]); strcpy(y[i], sta[g[i+5]]); } for (int i = 0; i < 4; i++) { add(x, y); ret(x); ret(y); } for (int i = 0; i < 5; i++) { strcpy(x[i], sta[g[4-i]]); strcpy(y[i], sta[d[g[i+5]]]); } for (int i = 0; i < 4; i++) { add(x, y); ret(x); ret(y); } } bool judge() { int r[M]; memset(r, 0, sizeof(r)); for (int i = 0; i < 5; i++) { int k = 0; for (int j = 0; j < 5; j++) { k = k * 2 + ( g[j] & (1<<i) ? 0 : 1 ); } if (r[k] >= c[k]) return false; g[i+5] = k; r[k]++; r[d[k]]++; } string str; for (int i = 0; i < 10; i++) { str = str + sta[g[i]]; } if (rec.find(str) != rec.end()) return false; return true; } void dfs(int deep) { if (deep >= 5) { if (judge()) del(); return; } for (int i = 0; i < 32; i++) { if (c[i] == 0) continue; c[i]--; c[d[i]]--; g[deep] = i; dfs(deep + 1); c[i]++; c[d[i]]++; } } int main() { handle(); while (init()) { dfs(0); printf("%d\n", ans / 2); } return 0; }
/// @file /// @author David Pilger <dpilger26@gmail.com> /// [GitHub Repository](https://github.com/dpilger26/NumCpp) /// @version 2.0.0 /// /// @section License /// Copyright 2020 David Pilger /// /// Permission is hereby granted, free of charge, to any person obtaining a copy of this /// software and associated documentation files(the "Software"), to deal in the Software /// without restriction, including without limitation the rights to use, copy, modify, /// merge, publish, distribute, sublicense, and/or sell copies of the Software, and to /// permit persons to whom the Software is furnished to do so, subject to the following /// conditions : /// /// The above copyright notice and this permission notice shall be included in all copies /// or substantial portions of the Software. /// /// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, /// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR /// PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE /// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR /// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER /// DEALINGS IN THE SOFTWARE. /// /// @section Description /// Functions for working with NdArrays /// #pragma once #include "NumCpp/NdArray.hpp" #include "NumCpp/Core/Internal/StaticAsserts.hpp" #include "NumCpp/Core/Internal/StlAlgorithms.hpp" #include <cmath> #include <complex> namespace nc { //============================================================================ // Method Description: /// Trigonometric sine. /// /// NumPy Reference: https://www.numpy.org/devdocs/reference/generated/numpy.sin.html /// /// @param /// inValue /// @return /// value /// template<typename dtype> auto sin(dtype inValue) noexcept { STATIC_ASSERT_ARITHMETIC_OR_COMPLEX(dtype); return std::sin(inValue); } //============================================================================ // Method Description: /// Trigonometric sine, element-wise. /// /// NumPy Reference: https://www.numpy.org/devdocs/reference/generated/numpy.sin.html /// /// @param /// inArray /// @return /// NdArray /// template<typename dtype> auto sin(const NdArray<dtype>& inArray) { NdArray<decltype(sin(dtype{0}))> returnArray(inArray.shape()); stl_algorithms::transform(inArray.cbegin(), inArray.cend(), returnArray.begin(), [](dtype inValue) noexcept -> auto { return sin(inValue); }); return returnArray; } }
// Copyright (c) 2011-2018 The PaydayCoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <qt/csvmodelwriter.h> #include <QAbstractItemModel> #include <QFile> #include <QTextStream> CSVModelWriter::CSVModelWriter(const QString &_filename, QObject *parent) : QObject(parent), filename(_filename), model(nullptr) { } void CSVModelWriter::setModel(const QAbstractItemModel *_model) { this->model = _model; } void CSVModelWriter::addColumn(const QString &title, int column, int role) { Column col; col.title = title; col.column = column; col.role = role; columns.append(col); } static void writeValue(QTextStream &f, const QString &value) { QString escaped = value; escaped.replace('"', "\"\""); f << "\"" << escaped << "\""; } static void writeSep(QTextStream &f) { f << ","; } static void writeNewline(QTextStream &f) { f << "\n"; } bool CSVModelWriter::write() { QFile file(filename); if(!file.open(QIODevice::WriteOnly | QIODevice::Text)) return false; QTextStream out(&file); int numRows = 0; if(model) { numRows = model->rowCount(); } // Header row for(int i=0; i<columns.size(); ++i) { if(i!=0) { writeSep(out); } writeValue(out, columns[i].title); } writeNewline(out); // Data rows for(int j=0; j<numRows; ++j) { for(int i=0; i<columns.size(); ++i) { if(i!=0) { writeSep(out); } QVariant data = model->index(j, columns[i].column).data(columns[i].role); writeValue(out, data.toString()); } writeNewline(out); } file.close(); return file.error() == QFile::NoError; }
/* PPPIPInterface.cpp */ /* Copyright (C) 2012 mbed.org, MIT License * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software * and associated documentation files (the "Software"), to deal in the Software without restriction, * including without limitation the rights to use, copy, modify, merge, publish, distribute, * sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or * substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING * BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #define __DEBUG__ 0 #ifndef __MODULE__ #define __MODULE__ "PPPIPInterface.cpp" #endif #include "core/fwk.h" #include "rtos.h" #include <cstdio> using std::sscanf; #include "PPPIPInterface.h" #define MSISDN "*99#" #define CONNECT_CMD_PREFIX "ATD " #define CONNECT_CMD_SUFFIX "\x0D" #define EXPECTED_RESP_SUFFIX "\x0D" "\x0A" "CONNECT" "\x0D" "\x0A" #define EXPECTED_RESP_DATARATE_SUFFIX "\x0D" "\x0A" "CONNECT %d" "\x0D" "\x0A" #define EXPECTED_RESP_MIN_LEN 20 #define OK_RESP "\x0D" "\x0A" "OK" "\x0D" "\x0A" #define ESCAPE_SEQ "+++" #define HANGUP_CMD "ATH" "\x0D" #define NO_CARRIER_RESP "\x0D" "\x0A" "NO CARRIER" "\x0D" "\x0A" extern "C" { #include "lwip/ip_addr.h" #include "lwip/inet.h" #include "lwip/err.h" #include "lwip/dns.h" #include "netif/ppp/ppp.h" } PPPIPInterface::PPPIPInterface(IOStream* pStream, const char* msisdn) : LwIPInterface(), m_linkStatusSphre(1), m_pppErrCode(0), m_pStream(pStream), m_streamAvail(true), m_pppd(-1) { m_connectCmd = new char[strlen(CONNECT_CMD_PREFIX) + strlen(msisdn) + strlen(CONNECT_CMD_SUFFIX) + 1]; sprintf(m_connectCmd, "%s%s%s", CONNECT_CMD_PREFIX, msisdn, CONNECT_CMD_SUFFIX); m_expectedResp = new char[strlen(m_connectCmd) + strlen(EXPECTED_RESP_SUFFIX) + 1]; sprintf(m_expectedResp, "%s%s", m_connectCmd, EXPECTED_RESP_SUFFIX); m_expectedRespDatarate = new char[strlen(m_connectCmd) + strlen(EXPECTED_RESP_DATARATE_SUFFIX) + 1]; sprintf(m_expectedRespDatarate, "%s%s", m_connectCmd, EXPECTED_RESP_DATARATE_SUFFIX); m_linkStatusSphre.wait(); } /*virtual*/ PPPIPInterface::~PPPIPInterface() { delete m_connectCmd; delete m_expectedResp; delete m_expectedRespDatarate; } /*virtual*/ int PPPIPInterface::init() //Init PPP-specific stuff, create the right bindings, etc { DBG("Initializing LwIP"); LwIPInterface::init(); //Init LwIP, NOT including PPP DBG("Initializing PPP"); pppInit(); DBG("Done"); return OK; } int PPPIPInterface::setup(const char* user, const char* pw) { DBG("Configuring PPP authentication method"); pppSetAuth(PPPAUTHTYPE_ANY, user, pw); DBG("Done"); return OK; } /*virtual*/ int PPPIPInterface::connect() { int ret; char buf[32]; size_t len; DBG("Trying to connect with PPP"); cleanupLink(); DBG("Sending %s", m_connectCmd); ret = m_pStream->write((uint8_t*)m_connectCmd, strlen(m_connectCmd), osWaitForever); if( ret != OK ) { return NET_UNKNOWN; } DBG("Expect %s", m_expectedResp); len = 0; size_t readLen; ret = m_pStream->read((uint8_t*)buf + len, &readLen, EXPECTED_RESP_MIN_LEN, 10000); if( ret != OK ) { return NET_UNKNOWN; } len += readLen; while( (len < EXPECTED_RESP_MIN_LEN) || (buf[len-1] != LF) ) { ret = m_pStream->read((uint8_t*)buf + len, &readLen, 1, 10000); if( ret != OK ) { return NET_UNKNOWN; } len += readLen; } buf[len]=0; DBG("Got %s[len %d]", buf, len); int datarate = 0; if( (sscanf(buf, m_expectedRespDatarate, &datarate ) != 1) && (strcmp(m_expectedResp, buf) != 0) ) { //Discard buffer do //Clear buf { ret = m_pStream->read((uint8_t*)buf, &len, 32, 0); } while( (ret == OK) && (len > 0) ); return NET_CONN; } DBG("Transport link open"); if(datarate != 0) { DBG("Datarate: %d bps", datarate); } m_linkStatusSphre.wait(0); if((m_pppd != -1) && (m_pppErrCode == 0)) //Already connected { return NET_INVALID; } ret = pppOverSerialOpen(this, PPPIPInterface::linkStatusCb, this); if(ret < 0) { switch(ret) { case PPPERR_OPEN: default: return NET_FULL; //All available resources are already used } } m_pppd = ret; //PPP descriptor m_linkStatusSphre.wait(); //Block indefinitely; there should be a timeout there if(m_pppErrCode != PPPERR_NONE) { m_pppd = -1; } switch(m_pppErrCode) { case PPPERR_NONE: //Connected OK return OK; case PPPERR_CONNECT: //Connection lost return NET_INTERRUPTED; case PPPERR_AUTHFAIL: //Authentication failed return NET_AUTH; case PPPERR_PROTOCOL: //Protocol error return NET_PROTOCOL; default: return NET_UNKNOWN; } } /*virtual*/ int PPPIPInterface::disconnect() { int ret = m_linkStatusSphre.wait(0); if(ret > 0) //Already disconnected? { m_pppd = -1; //Discard PPP descriptor switch(m_pppErrCode) { case PPPERR_CONNECT: //Connection terminated case PPPERR_AUTHFAIL: //Authentication failed case PPPERR_PROTOCOL: //Protocol error case PPPERR_USER: return OK; default: return NET_UNKNOWN; } } else { if(m_pppd == -1) { return NET_INVALID; } pppClose(m_pppd); do { m_linkStatusSphre.wait(); //Block indefinitely; there should be a timeout there DBG("Received PPP err code %d", m_pppErrCode); } while(m_pppErrCode != PPPERR_USER); m_pppd = -1; //Discard PPP descriptor } DBG("Sending %s", ESCAPE_SEQ); ret = m_pStream->write((uint8_t*)ESCAPE_SEQ, strlen(ESCAPE_SEQ), osWaitForever); if( ret != OK ) { return NET_UNKNOWN; } cleanupLink(); return OK; } int PPPIPInterface::cleanupLink() { int ret; char buf[32]; size_t len; do //Clear buf { ret = m_pStream->read((uint8_t*)buf, &len, 32, 100); if(ret == OK) { buf[len] = '\0'; DBG("Got %s", buf); } } while( (ret == OK) && (len > 0) ); DBG("Sending %s", HANGUP_CMD); ret = m_pStream->write((uint8_t*)HANGUP_CMD, strlen(HANGUP_CMD), osWaitForever); if( ret != OK ) { return NET_UNKNOWN; } size_t readLen; //Hangup DBG("Expect %s", HANGUP_CMD); len = 0; while( len < strlen(HANGUP_CMD) ) { ret = m_pStream->read((uint8_t*)buf + len, &readLen, strlen(HANGUP_CMD) - len, 100); if( ret != OK ) { break; } len += readLen; ///// buf[len]=0; DBG("Got %s", buf); } buf[len]=0; DBG("Got %s[len %d]", buf, len); //OK response DBG("Expect %s", OK_RESP); len = 0; while( len < strlen(OK_RESP) ) { ret = m_pStream->read((uint8_t*)buf + len, &readLen, strlen(OK_RESP) - len, 100); if( ret != OK ) { break; } len += readLen; ///// buf[len]=0; DBG("Got %s", buf); } buf[len]=0; DBG("Got %s[len %d]", buf, len); //NO CARRIER event DBG("Expect %s", NO_CARRIER_RESP); len = 0; while( len < strlen(NO_CARRIER_RESP) ) { ret = m_pStream->read((uint8_t*)buf + len, &readLen, strlen(NO_CARRIER_RESP) - len, 100); if( ret != OK ) { break; } len += readLen; ///// buf[len]=0; DBG("Got %s", buf); } buf[len]=0; DBG("Got %s[len %d]", buf, len); do //Clear buf { ret = m_pStream->read((uint8_t*)buf, &len, 32, 100); if(ret == OK) { buf[len] = '\0'; DBG("Got %s", buf); } } while( (ret == OK) && (len > 0) ); return OK; } /*static*/ void PPPIPInterface::linkStatusCb(void *ctx, int errCode, void *arg) //PPP link status { PPPIPInterface* pIf = (PPPIPInterface*)ctx; struct ppp_addrs* addrs = (struct ppp_addrs*) arg; switch(errCode) { case PPPERR_NONE: WARN("Connected via PPP."); DBG("Local IP address: %s", inet_ntoa(addrs->our_ipaddr)); DBG("Netmask: %s", inet_ntoa(addrs->netmask)); DBG("Remote IP address: %s", inet_ntoa(addrs->his_ipaddr)); DBG("Primary DNS: %s", inet_ntoa(addrs->dns1)); DBG("Secondary DNS: %s", inet_ntoa(addrs->dns2)); //Setup DNS if (addrs->dns1.addr != 0) { dns_setserver(0, (struct ip_addr*)&(addrs->dns1)); } if (addrs->dns2.addr != 0) { dns_setserver(1, (struct ip_addr*)&(addrs->dns1)); } pIf->setConnected(true); pIf->setIPAddress(inet_ntoa(addrs->our_ipaddr)); break; case PPPERR_CONNECT: //Connection lost WARN("Connection lost/terminated"); pIf->setConnected(false); break; case PPPERR_AUTHFAIL: //Authentication failed WARN("Authentication failed"); pIf->setConnected(false); break; case PPPERR_PROTOCOL: //Protocol error WARN("Protocol error"); pIf->setConnected(false); break; case PPPERR_USER: WARN("Disconnected by user"); pIf->setConnected(false); break; default: WARN("Unknown error (%d)", errCode); pIf->setConnected(false); break; } pIf->m_linkStatusSphre.wait(0); //If previous event has not been handled, "delete" it now pIf->m_pppErrCode = errCode; pIf->m_linkStatusSphre.release(); } //LwIP PPP implementation extern "C" { /** * Writes to the serial device. * * @param fd serial device handle * @param data pointer to data to send * @param len length (in bytes) of data to send * @return number of bytes actually sent * * @note This function will block until all data can be sent. */ u32_t sio_write(sio_fd_t fd, u8_t *data, u32_t len) { DBG("sio_write"); PPPIPInterface* pIf = (PPPIPInterface*)fd; int ret; if(!pIf->m_streamAvail) //If stream is not available (it is a shared resource) don't go further { return 0; } ret = pIf->m_pStream->write(data, len, osWaitForever); //Blocks until all data is sent or an error happens if(ret != OK) { return 0; } return len; } /** * Reads from the serial device. * * @param fd serial device handle * @param data pointer to data buffer for receiving * @param len maximum length (in bytes) of data to receive * @return number of bytes actually received - may be 0 if aborted by sio_read_abort * * @note This function will block until data can be received. The blocking * can be cancelled by calling sio_read_abort(). */ u32_t sio_read(sio_fd_t fd, u8_t *data, u32_t len) { DBG("sio_read"); PPPIPInterface* pIf = (PPPIPInterface*)fd; int ret; size_t readLen; if(!pIf->m_streamAvail) //If stream is not available (it is a shared resource) don't go further { WARN("EXIT NOT AVAIL"); return 0; } ret = pIf->m_pStream->read(data, &readLen, len, osWaitForever); //Blocks until some data is received or an error happens if(ret != OK) { return 0; } DBG("ret"); return readLen; } /** * Aborts a blocking sio_read() call. * * @param fd serial device handle */ void sio_read_abort(sio_fd_t fd) { DBG("sio_read_abort"); PPPIPInterface* pIf = (PPPIPInterface*)fd; if(!pIf->m_streamAvail) //If stream is not available (it is a shared resource) don't go further { return; } pIf->m_pStream->abortRead(); DBG("ret"); } }
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include <gtest/gtest.h> #include <memory> #include <string> #include <vector> #include "cyber/proto/unit_test.pb.h" #include "cyber/transport/receiver/intra_receiver.h" #include "cyber/transport/transmitter/intra_transmitter.h" namespace apollo { namespace cyber { namespace transport { class IntraTranceiverTest : public ::testing::Test { protected: using TransmitterPtr = std::shared_ptr<Transmitter<proto::UnitTest>>; using ReceiverPtr = std::shared_ptr<Receiver<proto::UnitTest>>; IntraTranceiverTest() : channel_name_("intra_channel") {} virtual ~IntraTranceiverTest() {} virtual void SetUp() { RoleAttributes attr; attr.set_channel_name(channel_name_); transmitter_a_ = std::make_shared<IntraTransmitter<proto::UnitTest>>(attr); transmitter_b_ = std::make_shared<IntraTransmitter<proto::UnitTest>>(attr); transmitter_a_->Enable(); transmitter_b_->Enable(); } virtual void TearDown() { transmitter_a_ = nullptr; transmitter_b_ = nullptr; } std::string channel_name_; TransmitterPtr transmitter_a_ = nullptr; TransmitterPtr transmitter_b_ = nullptr; }; TEST_F(IntraTranceiverTest, constructor) { RoleAttributes attr; TransmitterPtr transmitter = std::make_shared<IntraTransmitter<proto::UnitTest>>(attr); ReceiverPtr receiver = std::make_shared<IntraReceiver<proto::UnitTest>>(attr, nullptr); EXPECT_EQ(transmitter->seq_num(), 0); auto& transmitter_id = transmitter->id(); auto& receiver_id = receiver->id(); EXPECT_NE(transmitter_id.ToString(), receiver_id.ToString()); } TEST_F(IntraTranceiverTest, enable_and_disable) { // repeated call transmitter_a_->Enable(); std::vector<proto::UnitTest> msgs; RoleAttributes attr; attr.set_channel_name(channel_name_); ReceiverPtr receiver = std::make_shared<IntraReceiver<proto::UnitTest>>( attr, [&msgs](const std::shared_ptr<proto::UnitTest>& msg, const MessageInfo& msg_info, const RoleAttributes& attr) { (void)msg_info; (void)attr; msgs.emplace_back(*msg); }); receiver->Enable(); // repeated call receiver->Enable(); ReceiverPtr receiver_null_cb = std::make_shared<IntraReceiver<proto::UnitTest>>(attr, nullptr); receiver_null_cb->Enable(); auto msg = std::make_shared<proto::UnitTest>(); msg->set_class_name("IntraTranceiverTest"); msg->set_case_name("enable_and_disable"); EXPECT_TRUE(transmitter_a_->Transmit(msg)); EXPECT_EQ(msgs.size(), 1); EXPECT_TRUE(transmitter_b_->Transmit(msg)); EXPECT_EQ(msgs.size(), 2); for (auto& item : msgs) { EXPECT_EQ(item.class_name(), "IntraTranceiverTest"); EXPECT_EQ(item.case_name(), "enable_and_disable"); } transmitter_b_->Disable(receiver->attributes()); EXPECT_FALSE(transmitter_b_->Transmit(msg)); transmitter_b_->Enable(receiver->attributes()); auto& transmitter_b_attr = transmitter_b_->attributes(); receiver->Disable(); receiver->Enable(transmitter_b_attr); msgs.clear(); EXPECT_TRUE(transmitter_a_->Transmit(msg)); EXPECT_EQ(msgs.size(), 0); EXPECT_TRUE(transmitter_b_->Transmit(msg)); EXPECT_EQ(msgs.size(), 1); for (auto& item : msgs) { EXPECT_EQ(item.class_name(), "IntraTranceiverTest"); EXPECT_EQ(item.case_name(), "enable_and_disable"); } receiver->Disable(transmitter_b_attr); msgs.clear(); EXPECT_TRUE(transmitter_b_->Transmit(msg)); EXPECT_EQ(msgs.size(), 0); } } // namespace transport } // namespace cyber } // namespace apollo
/* Source file for the R2 solid class */ /* Include files */ #include "R2Shapes.h" // Namespace namespace gaps { /* Public functions */ int R2InitSolid() { /* Return success */ return TRUE; } void R2StopSolid() { } R2Solid:: R2Solid(void) { } R2Solid:: ~R2Solid(void) { } const RNBoolean R2Solid:: IsSolid(void) const { // All solid shapes are solids return TRUE; } } // namespace gaps
// Copyright (c) 2011-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. // Unit tests for denial-of-service detection/prevention code #include "chainparams.h" #include "keystore.h" #include "net.h" #include "net_processing.h" #include "pow.h" #include "script/sign.h" #include "serialize.h" #include "util.h" #include "validation.h" #include "test/test_epmcoin.h" #include <stdint.h> #include <boost/assign/list_of.hpp> // for 'map_list_of()' #include <boost/date_time/posix_time/posix_time_types.hpp> #include <boost/foreach.hpp> #include <boost/test/unit_test.hpp> // Tests these internal-to-net_processing.cpp methods: extern bool AddOrphanTx(const CTransactionRef& tx, NodeId peer); extern void EraseOrphansFor(NodeId peer); extern unsigned int LimitOrphanTxSize(unsigned int nMaxOrphans); struct COrphanTx { CTransactionRef tx; NodeId fromPeer; int64_t nTimeExpire; }; extern std::map<uint256, COrphanTx> mapOrphanTransactions; CService ip(uint32_t i) { struct in_addr s; s.s_addr = i; return CService(CNetAddr(s), Params().GetDefaultPort()); } static NodeId id = 0; BOOST_FIXTURE_TEST_SUITE(DoS_tests, TestingSetup) BOOST_AUTO_TEST_CASE(DoS_banning) { std::atomic<bool> interruptDummy(false); connman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, "", true); dummyNode1.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode1, *connman); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; Misbehaving(dummyNode1.GetId(), 100); // Should get banned SendMessages(&dummyNode1, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); BOOST_CHECK(!connman->IsBanned(ip(0xa0b0c001|0x0000ff00))); // Different IP, not banned CAddress addr2(ip(0xa0b0c002), NODE_NONE); CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, "", true); dummyNode2.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode2, *connman); dummyNode2.nVersion = 1; dummyNode2.fSuccessfullyConnected = true; Misbehaving(dummyNode2.GetId(), 50); SendMessages(&dummyNode2, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr2)); // 2 not banned yet... BOOST_CHECK(connman->IsBanned(addr1)); // ... but 1 still should be Misbehaving(dummyNode2.GetId(), 50); SendMessages(&dummyNode2, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr2)); } BOOST_AUTO_TEST_CASE(DoS_banscore) { std::atomic<bool> interruptDummy(false); connman->ClearBanned(); ForceSetArg("-banscore", "111"); // because 11 is my favorite number CAddress addr1(ip(0xa0b0c001), NODE_NONE); CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 3, 1, "", true); dummyNode1.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode1, *connman); dummyNode1.nVersion = 1; dummyNode1.fSuccessfullyConnected = true; Misbehaving(dummyNode1.GetId(), 100); SendMessages(&dummyNode1, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); Misbehaving(dummyNode1.GetId(), 10); SendMessages(&dummyNode1, *connman, interruptDummy); BOOST_CHECK(!connman->IsBanned(addr1)); Misbehaving(dummyNode1.GetId(), 1); SendMessages(&dummyNode1, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr1)); ForceSetArg("-banscore", std::to_string(DEFAULT_BANSCORE_THRESHOLD)); } BOOST_AUTO_TEST_CASE(DoS_bantime) { std::atomic<bool> interruptDummy(false); connman->ClearBanned(); int64_t nStartTime = GetTime(); SetMockTime(nStartTime); // Overrides future calls to GetTime() CAddress addr(ip(0xa0b0c001), NODE_NONE); CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, "", true); dummyNode.SetSendVersion(PROTOCOL_VERSION); GetNodeSignals().InitializeNode(&dummyNode, *connman); dummyNode.nVersion = 1; dummyNode.fSuccessfullyConnected = true; Misbehaving(dummyNode.GetId(), 100); SendMessages(&dummyNode, *connman, interruptDummy); BOOST_CHECK(connman->IsBanned(addr)); SetMockTime(nStartTime+60*60); BOOST_CHECK(connman->IsBanned(addr)); SetMockTime(nStartTime+60*60*24+1); BOOST_CHECK(!connman->IsBanned(addr)); } CTransactionRef RandomOrphan() { std::map<uint256, COrphanTx>::iterator it; it = mapOrphanTransactions.lower_bound(GetRandHash()); if (it == mapOrphanTransactions.end()) it = mapOrphanTransactions.begin(); return it->second.tx; } BOOST_AUTO_TEST_CASE(DoS_mapOrphans) { CKey key; key.MakeNewKey(true); CBasicKeyStore keystore; keystore.AddKey(key); // 50 orphan transactions: for (int i = 0; i < 50; i++) { CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].prevout.n = 0; tx.vin[0].prevout.hash = GetRandHash(); tx.vin[0].scriptSig << OP_1; tx.vout.resize(1); tx.vout[0].nValue = 1*CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); AddOrphanTx(MakeTransactionRef(tx), i); } // ... and 50 that depend on other orphans: for (int i = 0; i < 50; i++) { CTransactionRef txPrev = RandomOrphan(); CMutableTransaction tx; tx.vin.resize(1); tx.vin[0].prevout.n = 0; tx.vin[0].prevout.hash = txPrev->GetHash(); tx.vout.resize(1); tx.vout[0].nValue = 1*CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); SignSignature(keystore, *txPrev, tx, 0); AddOrphanTx(MakeTransactionRef(tx), i); } // This really-big orphan should be ignored: for (int i = 0; i < 10; i++) { CTransactionRef txPrev = RandomOrphan(); CMutableTransaction tx; tx.vout.resize(1); tx.vout[0].nValue = 1*CENT; tx.vout[0].scriptPubKey = GetScriptForDestination(key.GetPubKey().GetID()); tx.vin.resize(2777); for (unsigned int j = 0; j < tx.vin.size(); j++) { tx.vin[j].prevout.n = j; tx.vin[j].prevout.hash = txPrev->GetHash(); } SignSignature(keystore, *txPrev, tx, 0); // Re-use same signature for other inputs // (they don't have to be valid for this test) for (unsigned int j = 1; j < tx.vin.size(); j++) tx.vin[j].scriptSig = tx.vin[0].scriptSig; BOOST_CHECK(!AddOrphanTx(MakeTransactionRef(tx), i)); } // Test EraseOrphansFor: for (NodeId i = 0; i < 3; i++) { size_t sizeBefore = mapOrphanTransactions.size(); EraseOrphansFor(i); BOOST_CHECK(mapOrphanTransactions.size() < sizeBefore); } // Test LimitOrphanTxSize() function: LimitOrphanTxSize(40); BOOST_CHECK(mapOrphanTransactions.size() <= 40); LimitOrphanTxSize(10); BOOST_CHECK(mapOrphanTransactions.size() <= 10); LimitOrphanTxSize(0); BOOST_CHECK(mapOrphanTransactions.empty()); } BOOST_AUTO_TEST_SUITE_END()
// Software License for MTL // // Copyright (c) 2007 The Trustees of Indiana University. // 2008 Dresden University of Technology and the Trustees of Indiana University. // 2010 SimuNova UG (haftungsbeschränkt), www.simunova.com. // All rights reserved. // Authors: Peter Gottschling and Andrew Lumsdaine // // This file is part of the Matrix Template Library // // See also license.mtl.txt in the distribution. #ifndef MTL_MATRIX_INV_INCLUDE #define MTL_MATRIX_INV_INCLUDE #include <boost/numeric/mtl/mtl_fwd.hpp> #include <boost/numeric/mtl/concept/collection.hpp> #include <boost/numeric/mtl/matrix/identity.hpp> #include <boost/numeric/mtl/operation/upper_trisolve.hpp> #include <boost/numeric/mtl/operation/lu.hpp> #include <boost/numeric/mtl/utility/exception.hpp> #include <boost/numeric/mtl/utility/irange.hpp> #include <boost/numeric/mtl/utility/tag.hpp> #include <boost/numeric/mtl/vector/parameter.hpp> #include <boost/numeric/mtl/vector/dense_vector.hpp> #include <boost/numeric/mtl/vector/unit_vector.hpp> #include <boost/numeric/mtl/interface/vpt.hpp> namespace mtl { namespace matrix { namespace traits { /// Return type of inv(Matrix) /** Might be specialized later for the sake of efficiency **/ template <typename Matrix> struct inv { typedef typename Collection<Matrix>::value_type value_type; typedef ::mtl::matrix::dense2D<value_type> type; }; } // traits /// Invert upper triangular matrix template <typename Matrix, typename MatrixOut> void inv_upper(Matrix const& A, MatrixOut& Inv) { vampir_trace<5019> tracer; typedef typename Collection<Matrix>::value_type value_type; typedef typename Collection<Matrix>::size_type size_type; const size_type N= num_rows(A); MTL_DEBUG_THROW_IF(num_cols(A) != N, matrix_not_square()); MTL_DEBUG_THROW_IF(N != num_rows(Inv) || num_cols(A) != num_cols(Inv), incompatible_size()); Inv= math::zero(value_type()); for (size_type k= 0; k < N; ++k) { irange r(k+1); typename mtl::ColumnInMatrix<MatrixOut>::type col_k(Inv[r][k]); upper_trisolve(A[r][r], vector::unit_vector<value_type>(k, k+1), col_k, mtl::tag::regular_diagonal()); } } /// Invert upper triangular matrix template <typename Matrix> inline typename traits::inv<Matrix>::type inv_upper(Matrix const& A) { typedef typename Collection<Matrix>::size_type size_type; const size_type N= num_rows(A); typename traits::inv<Matrix>::type Inv(N, N); inv_upper(A, Inv); return Inv; } #if 0 /// Invert lower triangular matrix template <typename Matrix, typename MatrixOut> inline void inv_lower(Matrix const& A, MatrixOut& Inv) { vampir_trace<5020> tracer; typedef typename Collection<Matrix>::value_type value_type; typedef typename Collection<Matrix>::size_type size_type; const size_type N= num_rows(A); MTL_DEBUG_THROW_IF(num_cols(A) != N, matrix_not_square()); MTL_DEBUG_THROW_IF(N != num_rows(Inv) || num_cols(A) != num_cols(Inv), incompatible_size()); Inv= math::zero(value_type()); for (size_type k= 0; k < N; ++k) { irange r(k, N); typename mtl::ColumnInMatrix<MatrixOut>::type col_k(Inv[r][k]); lower_trisolve(A[r][r], vector::unit_vector<value_type>(0, N-k), col_k, mtl::tag::regular_diagonal()); } } template <typename Matrix> typename traits::inv<Matrix>::type inline inv_lower(Matrix const& A) { typedef typename Collection<Matrix>::size_type size_type; const size_type N= num_rows(A); typename traits::inv<Matrix>::type Inv(N, N); inv_lower(A, Inv); return Inv; } #endif #if 1 /// Invert lower triangular matrix template <typename Matrix> typename traits::inv<Matrix>::type inline inv_lower(Matrix const& A) { vampir_trace<5020> tracer; Matrix T(trans(A)); // Shouldn't be needed return typename traits::inv<Matrix>::type(trans(inv_upper(T))); } #endif /// Invert matrix /** Uses pivoting LU factorization and triangular inversion \sa \ref lu, \ref inv_upper, \ref inv_lower **/ template <typename Matrix, typename MatrixOut> inline void inv(Matrix const& A, MatrixOut& Inv) { vampir_trace<5021> tracer; typedef typename Collection<Matrix>::size_type size_type; typedef typename Collection<Matrix>::value_type value_type; typedef typename traits::inv<Matrix>::type result_type; const size_type N= num_rows(A); MTL_THROW_IF(num_cols(A) != num_cols(A), matrix_not_square()); MTL_DEBUG_THROW_IF(N != num_rows(Inv) || num_cols(A) != num_cols(Inv), incompatible_size()); if (N == 1) { Inv[0][0]= value_type(1) / A[0][0]; return; } result_type PLU(A); mtl::vector::dense_vector<size_type, vector::parameters<> > Pv(num_rows(A)); lu(PLU, Pv); result_type PU(upper(PLU)), PL(strict_lower(PLU)); for (size_type i= 0; i < num_rows(A); i++) PL[i][i]= value_type(1); Inv= inv_upper(PU) * inv_lower(PL) * permutation(Pv); } template <typename Matrix> typename traits::inv<Matrix>::type inline inv(Matrix const& A) { typedef typename Collection<Matrix>::size_type size_type; const size_type N= num_rows(A); typename traits::inv<Matrix>::type Inv(N, N); inv(A, Inv); return Inv; } }} // namespace mtl::matrix #endif // MTL_MATRIX_INV_INCLUDE
#include <_param_cash_total_selling.hpp> START_ATF_NAMESPACE int _param_cash_total_selling::size() { using org_ptr = int (WINAPIV*)(struct _param_cash_total_selling*); return (org_ptr(0x140304bb0L))(this); }; END_ATF_NAMESPACE
#include "controls/controller.h" #include <cmath> #ifndef USE_SDL1 #include "controls/devices/game_controller.h" #endif #include "controls/devices/joystick.h" #include "controls/devices/kbcontroller.h" namespace devilution { void UnlockControllerState(const SDL_Event &event) { #ifndef USE_SDL1 GameController *const controller = GameController::Get(event); if (controller != nullptr) { controller->UnlockTriggerState(); } #endif } ControllerButtonEvent ToControllerButtonEvent(const SDL_Event &event) { ControllerButtonEvent result { ControllerButton_NONE, false }; switch (event.type) { #ifndef USE_SDL1 case SDL_CONTROLLERBUTTONUP: #endif case SDL_JOYBUTTONUP: case SDL_KEYUP: result.up = true; break; default: break; } #if HAS_KBCTRL == 1 result.button = KbCtrlToControllerButton(event); if (result.button != ControllerButton_NONE) return result; #endif #ifndef USE_SDL1 GameController *const controller = GameController::Get(event); if (controller != nullptr) { result.button = controller->ToControllerButton(event); if (result.button != ControllerButton_NONE) { if (result.button == ControllerButton_AXIS_TRIGGERLEFT || result.button == ControllerButton_AXIS_TRIGGERRIGHT) { result.up = !controller->IsPressed(result.button); } return result; } } #endif const Joystick *joystick = Joystick::Get(event); if (joystick != nullptr) { result.button = devilution::Joystick::ToControllerButton(event); } return result; } bool IsControllerButtonPressed(ControllerButton button) { #ifndef USE_SDL1 if (GameController::IsPressedOnAnyController(button)) return true; #endif #if HAS_KBCTRL == 1 if (IsKbCtrlButtonPressed(button)) return true; #endif return Joystick::IsPressedOnAnyJoystick(button); } bool HandleControllerAddedOrRemovedEvent(const SDL_Event &event) { #ifndef USE_SDL1 switch (event.type) { case SDL_CONTROLLERDEVICEADDED: GameController::Add(event.cdevice.which); break; case SDL_CONTROLLERDEVICEREMOVED: GameController::Remove(event.cdevice.which); break; case SDL_JOYDEVICEADDED: Joystick::Add(event.jdevice.which); break; case SDL_JOYDEVICEREMOVED: Joystick::Remove(event.jdevice.which); break; default: return false; } return true; #else return false; #endif } } // namespace devilution
#include <despot/simple_tui.h> #include "reg_demo.h" using namespace despot; class TUI: public SimpleTUI { public: TUI() { } DSPOMDP* InitializeModel(option::Option* options) { DSPOMDP* model = !options[E_PARAMS_FILE] ? new RegDemo() : new RegDemo(options[E_PARAMS_FILE].arg); return model; } void InitializeDefaultParameters() { } }; int main(int argc, char* argv[]) { return TUI().run(argc, argv); }
#include "Planner.h" #include <AI/Behaviour/ActionSequence.h> #include <AI/PropActorBrain.h> #include <Core/Factory.h> const CString StrActTplPrefix("AI::CActionTpl"); namespace AI { static CArray<CActionTpl*> ActionsAdded; using namespace Data; void CPlanner::RegisterActionTpl(const char* Name, Data::PParams Params) { if (!FindActionTpl(Name)) { PActionTpl NewTpl = Core::CFactory::Instance().Create<CActionTpl>(StrActTplPrefix + Name); NewTpl->Init(Params); ActionTpls.Add(NewTpl); //???dictionary? CStrID -> Tpl? } } //--------------------------------------------------------------------- void CPlanner::EndActionTpls() { for (; NewActIdx < ActionTpls.GetCount(); ++NewActIdx) { const CWorldState& Effects = ActionTpls[NewActIdx]->GetEffects(); for (UPTR i = 0; i < WSP_Count; ++i) if (Effects.IsPropSet((EWSProp)i)) EffectToActions[i].Add(ActionTpls[NewActIdx]); } ActionsAdded.Reallocate(ActionTpls.GetCount(), 0); } //--------------------------------------------------------------------- // Determine the current world state values for world state properties. void CPlanner::MergeWorldStates(CWorldState& WSCurr, const CWorldState& WSGoal, const CWorldState& WSActor) { for (UPTR i = 0; i < WSP_Count; ++i) if (WSGoal.IsPropSet((EWSProp)i) && !WSCurr.IsPropSet((EWSProp)i)) WSCurr.SetPropFrom((EWSProp)i, WSActor); } //--------------------------------------------------------------------- bool CPlanner::IsPlanValid(CActor* pActor, CNode* pNode, const CWorldState& WSActor) { CActionTpl* pAction = pNode->pAction; if (!pAction) FAIL; CWorldState WorldState; MergeWorldStates(WorldState, pNode->WSCurr, WSActor); CNode* pNodeParent = nullptr; while (pAction) { pNodeParent = pNode->pParent; // Validate world state effects // Action is valid is there are effects not met in the current world state bool Valid = false; for (UPTR i = 0; i < WSP_Count; ++i) { const CData& Effect = pAction->GetEffects().GetProp((EWSProp)i); if (Effect.IsValid()) if (Effect.IsA<EWSProp>()) { EWSProp PcVal = Effect.GetValue<EWSProp>(); if (!WorldState.IsPropSet((EWSProp)i) || !pNodeParent->WSGoal.IsPropSet(PcVal) || WorldState.GetProp((EWSProp)i) != pNodeParent->WSGoal.GetProp(PcVal)) { Valid = true; break; } } else { if (!WorldState.IsPropSet((EWSProp)i) || WorldState.GetProp((EWSProp)i) != Effect) { Valid = true; break; } } } if (!Valid) FAIL; // Validate world state preconditions if (pNode->HasPreconditions) for (UPTR i = 0; i < WSP_Count; ++i) { const CData& Precondition = pNode->WSPreconditions.GetProp((EWSProp)i); if (Precondition.IsValid()) if (Precondition.IsA<EWSProp>()) { EWSProp PcVal = Precondition.GetValue<EWSProp>(); if (WorldState.IsPropSet((EWSProp)i) && pNodeParent->WSGoal.IsPropSet(PcVal) && WorldState.GetProp((EWSProp)i) != pNodeParent->WSGoal.GetProp(PcVal)) FAIL; } else { if (WorldState.IsPropSet((EWSProp)i) && WorldState.GetProp((EWSProp)i) != Precondition) FAIL; } } // Checked in FindNeighbors, result can't change in synchronous environment //if (!pAction->ValidateContextPreconditions(pActor, pNodeParent->WSGoal)) FAIL; //!!!Probability was checked here! // Apply world state effects for (UPTR i = 0; i < WSP_Count; ++i) { const CData& Effect = pAction->GetEffects().GetProp((EWSProp)i); if (Effect.IsValid()) if (Effect.IsA<EWSProp>()) { EWSProp PcVal = Effect.GetValue<EWSProp>(); if (pNodeParent->WSGoal.IsPropSet(PcVal)) WorldState.SetProp((EWSProp)i, pNodeParent->WSGoal.GetProp(PcVal)); } else WorldState.SetProp((EWSProp)i, Effect); } pNode = pNodeParent; pAction = pNode->pAction; } // Check does the WorldState satisfy the goal world state for (UPTR i = 0; i < WSP_Count; ++i) if (pNodeParent->WSGoal.IsPropSet((EWSProp)i) && (!WorldState.IsPropSet((EWSProp)i) || pNodeParent->WSGoal.GetProp((EWSProp)i) != WorldState.GetProp((EWSProp)i))) FAIL; OK; } //--------------------------------------------------------------------- int CPlanner::CmpPlannerNodes(const void* First, const void* Second) { if (!First || !Second) return 0; return (*(const CNode**)Second)->pAction->GetPrecedence() - (*(const CNode**)First)->pAction->GetPrecedence(); } //--------------------------------------------------------------------- // A neighbor is an Action that has an effect potentially satisfying one of the goal props. // Neighbors are based only on the property key, not on the associated value. void CPlanner::FillNeighbors(CActor* pActor, const CNode& Node, CArray<CNode*>& OutNeighbors) { OutNeighbors.Clear(); ActionsAdded.Clear(); for (UPTR i = 0; i < WSP_Count; ++i) { // Neighbor satisfies property if it is set in both Curr & Goal WS and Curr value != Goal value if (!Node.WSCurr.IsPropSet((EWSProp)i) || !Node.WSGoal.IsPropSet((EWSProp)i) || Node.WSCurr.GetProp((EWSProp)i) == Node.WSGoal.GetProp((EWSProp)i)) continue; CArray<CActionTpl*>& Actions = EffectToActions[i]; for (CArray<CActionTpl*>::CIterator ppAction = Actions.Begin(); ppAction != Actions.End(); ++ppAction) if (ActionsAdded.FindIndexSorted(*ppAction) == INVALID_INDEX && pActor->IsActionAvailable(*ppAction) && (*ppAction)->ValidateContextPreconditions(pActor, Node.WSGoal)) { CNode* pNewNode = NodePool.Construct(); pNewNode->pAction = *ppAction; OutNeighbors.Add(pNewNode); ActionsAdded.InsertSorted(*ppAction); if (OutNeighbors.GetCount() >= ActionTpls.GetCount()) break; } } // Sort actions by precedence, so the plan will be sorted by precedence too if (OutNeighbors.GetCount() > 1) qsort((void*)OutNeighbors.Begin(), (size_t)OutNeighbors.GetCount(), sizeof(CNode*), CmpPlannerNodes); } //--------------------------------------------------------------------- PAction CPlanner::BuildPlan(CActor* pActor, CGoal* pGoal) { n_assert(pActor && pGoal); CWorldState WSActor; pActor->FillWorldState(WSActor); // Some sources recommend to use IDA* instead of basic A*. // We use A* at least for now. Data::CList<CNode*> OpenList, ClosedList; Data::CList<CNode*>::CIterator ItCurrNode = OpenList.AddFront(NodePool.Construct()); CNode* pCurrNode = *ItCurrNode; pGoal->GetDesiredProps(pCurrNode->WSGoal); MergeWorldStates(pCurrNode->WSCurr, pCurrNode->WSGoal, WSActor); pCurrNode->Goal = 0; pCurrNode->Fitness = pCurrNode->WSCurr.GetDiffCount(pCurrNode->WSGoal); CArray<CNode*> Neighbors; while (true) { //!!!PERF // Optimization: "How to Achieve Lightning-Fast A*", // AI Game Programming Wisdom, p. 133. // Specifically "Be a Cheapskate" on p. 140. ItCurrNode = OpenList.Begin(); if (!ItCurrNode) break; // No valid plan exists for (Data::CList<CNode*>::CIterator It = ItCurrNode; It; ++It) if ((*It)->Fitness < (*ItCurrNode)->Fitness) ItCurrNode = It; OpenList.Remove(ItCurrNode, &pCurrNode); ClosedList.AddFront(pCurrNode); //!!!this re-checks plan from the beginning! can re-check only part changed since last check? if (IsPlanValid(pActor, pCurrNode, WSActor)) break; // Valid plan found FillNeighbors(pActor, *pCurrNode, Neighbors); for (UPTR i = 0; i < Neighbors.GetCount(); ++i) { CNode* pNeighbor = Neighbors[i]; pNeighbor->WSCurr = pCurrNode->WSCurr; pNeighbor->WSGoal = pCurrNode->WSGoal; pNeighbor->HasPreconditions = pNeighbor->pAction->GetPreconditions(pActor, pNeighbor->WSPreconditions, pCurrNode->WSGoal); // Apply effects from goal. Solve properties of the world state that need to // be satisfied that match the action's effects for (UPTR i = 0; i < WSP_Count; ++i) { const CData& Effect = pNeighbor->pAction->GetEffects().GetProp((EWSProp)i); if (Effect.IsValid()) { const CData& Result = pNeighbor->WSGoal.GetProp((Effect.IsA<EWSProp>()) ? Effect.GetValue<EWSProp>() : (EWSProp)i); if (Result.IsValid()) { n_assert2(!Result.IsA<EWSProp>(), "Setting WS prop to variable not allowed here!"); pNeighbor->WSCurr.SetProp((EWSProp)i, Result); } } } // Apply preconditions if (pNeighbor->HasPreconditions) { for (UPTR i = 0; i < WSP_Count; ++i) { const CData& Precondition = pNeighbor->WSPreconditions.GetProp((EWSProp)i); if (!Precondition.IsValid()) continue; if (Precondition.IsA<EWSProp>()) { EWSProp PcVal = Precondition.GetValue<EWSProp>(); if (pNeighbor->WSGoal.IsPropSet(PcVal)) pNeighbor->WSGoal.SetProp((EWSProp)i, pNeighbor->WSGoal.GetProp(PcVal)); } else pNeighbor->WSGoal.SetProp((EWSProp)i, Precondition); } } MergeWorldStates(pNeighbor->WSCurr, pNeighbor->WSGoal, WSActor); pNeighbor->Goal = pCurrNode->Goal + pNeighbor->pAction->GetCost(); pNeighbor->Fitness = pNeighbor->Goal + pNeighbor->WSCurr.GetDiffCount(pNeighbor->WSGoal); pNeighbor->pParent = pCurrNode; n_assert(pNeighbor->Fitness < std::numeric_limits<int>::max()); //if (pNeighbor->Fitness == std::numeric_limits<int>::max()) NodePool.Destroy(pNeighbor); OpenList.AddFront(pNeighbor); } // No need now, but can optimize: // Reorder the list OPEN in order of increasing Fitness values. (Ties among minimal Fitness values // are resolved in favor of the deepest node in the search tree). } PAction Plan; PActionSequence Seq; DBG_ONLY(Sys::Log("Planner -> '%s' Begin plan\n", pActor->GetEntity()->GetUID())); while (pCurrNode && pCurrNode->pAction) { PAction CurrAction = pCurrNode->pAction->CreateInstance(pCurrNode->pParent->WSGoal); if (CurrAction.IsValidPtr()) { #ifdef _DEBUG std::string DbgString; CurrAction->GetDebugString(DbgString); Sys::Log("Planner -> '%s' Action added: '%s'\n", pActor->GetEntity()->GetUID(), DbgString.c_str()); #endif if (Plan.IsNullPtr()) Plan = CurrAction; else { if (Seq.IsNullPtr()) { Seq = n_new(CActionSequence); Seq->AddChild(Plan); Plan = Seq; } Seq->AddChild(CurrAction); } } pCurrNode = pCurrNode->pParent; } DBG_ONLY(Sys::Log("Planner -> '%s' End plan\n", pActor->GetEntity()->GetUID())); while (OpenList.RemoveBack(&pCurrNode)) NodePool.Destroy(pCurrNode); while (ClosedList.RemoveBack(&pCurrNode)) NodePool.Destroy(pCurrNode); return Plan; } //--------------------------------------------------------------------- }
/* * Copyright (c) 2018 MariaDB Corporation Ab * * Use of this software is governed by the Business Source License included * in the LICENSE.TXT file and at www.mariadb.com/bsl11. * * Change Date: 2025-05-25 * * On the date above, in accordance with the Business Source License, use * of this software will be governed by version 2 or later of the General * Public License. */ #pragma once #include <maxscale/ccdefs.hh> #include <algorithm> #include <functional> #include <numeric> /** * Helper functions for calculating statistics over STL containers of classes. Containers of * fundamental types aren't supported as the standard library functions already implement it. */ namespace maxscale { template<typename T> using ValueType = typename T::value_type; /** * Calculate sum of members * * @param values Container of values * @param member Member of T::value_type to use * * @return Sum of member values */ template<typename T, typename R> R sum(const T& values, R ValueType<T>::* member) { return std::accumulate(values.begin(), values.end(), R {}, [&](R r, ValueType<T> t) { return r + t.*member; }); } /** * Calculate average of members * * @param values Container of values * @param member Member of T::value_type to use * * @return Average of member values */ template<typename T, typename R> R avg(const T& values, R ValueType<T>::* member) { return values.empty() ? R {} : sum(values, member) / static_cast<R>(values.size()); } /** * Get minimum member value * * @param values Container of values * @param member Member of T::value_type to use * * @return The minimum value of T::*member in `values` */ template<typename T, typename R> R min(const T& values, R ValueType<T>::* member) { auto it = std::min_element(values.begin(), values.end(), [&](ValueType<T> a, ValueType<T> b) { return a.*member < b.*member; }); return it != values.end() ? (*it).*member : R {}; } /** * Get maximum member value * * @param values Container of values * @param member Member of T::value_type to use * * @return The maximum value of T::*member in `values` */ template<typename T, typename R> R max(const T& values, R ValueType<T>::* member) { auto it = std::max_element(values.begin(), values.end(), [&](ValueType<T> a, ValueType<T> b) { return a.*member < b.*member; }); return it != values.end() ? (*it).*member : R {}; } /** * Helper function for accumulating container-like member values * * This function accumulates the values element-wise with `accum` and returns the resulting container. * * @param values Container of values * @param member Member of T::value_type to use * @param accum Accumulator function * * @return Accumulated container */ template<typename T, typename R, typename Accum> R accumulate(const T& values, R ValueType<T>::* member, Accum accum) { return std::accumulate(values.begin(), values.end(), R {}, [&](R r, const ValueType<T>& t) { std::transform(r.begin(), r.end(), (t.*member).begin(), r.begin(), [&](ValueType<R> a, ValueType<R> b) { return accum(a, b); }); return r; }); } /** * Calculate sum of member container values * * @param values Container of values * @param member Member of T::value_type to use * * @return Sum of members */ template<typename T, typename R> R sum_element(const T& values, R ValueType<T>::* member) { return accumulate(values, member, std::plus<ValueType<R>>()); } /** * Calculate average of member container values * * @param values Container of values * @param member Member of T::value_type to use * * @return Average of members */ template<typename T, typename R> R avg_element(const T& values, R ValueType<T>::* member) { auto result = sum_element(values, member); for (auto&& a : result) { // Using C-style cast to work around an uncrustify bug a /= (ValueType<R>)(values.size()); } return result; } /** * Calculate minimum of member container values * * @param values Container of values * @param member Member of T::value_type to use * * @return Minimum of members */ template<typename T, typename R> R min_element(const T& values, R ValueType<T>::* member) { return accumulate(values, member, [](const ValueType<R>& a, const ValueType<R>& b) { return std::min(a, b); }); } /** * Calculate maximum of member container values * * @param values Container of values * @param member Member of T::value_type to use * * @return Maximum of members */ template<typename T, typename R> R max_element(const T& values, R ValueType<T>::* member) { return accumulate(values, member, [](const ValueType<R>& a, const ValueType<R>& b) { return std::max(a, b); }); } }
/** * @file * @copyright defined in roxe/LICENSE */ #pragma once #include <roxe/chain/abi_def.hpp> #include <roxe/chain/trace.hpp> #include <roxe/chain/exceptions.hpp> #include <fc/variant_object.hpp> #include <fc/scoped_exit.hpp> namespace roxe { namespace chain { using std::map; using std::string; using std::function; using std::pair; using namespace fc; namespace impl { struct abi_from_variant; struct abi_to_variant; struct abi_traverse_context; struct abi_traverse_context_with_path; struct binary_to_variant_context; struct variant_to_binary_context; } /** * Describes the binary representation message and table contents so that it can * be converted to and from JSON. */ struct abi_serializer { abi_serializer(){ configure_built_in_types(); } abi_serializer( const abi_def& abi, const fc::microseconds& max_serialization_time ); void set_abi(const abi_def& abi, const fc::microseconds& max_serialization_time); type_name resolve_type(const type_name& t)const; bool is_array(const type_name& type)const; bool is_optional(const type_name& type)const; bool is_type(const type_name& type, const fc::microseconds& max_serialization_time)const; bool is_builtin_type(const type_name& type)const; bool is_integer(const type_name& type) const; int get_integer_size(const type_name& type) const; bool is_struct(const type_name& type)const; type_name fundamental_type(const type_name& type)const; const struct_def& get_struct(const type_name& type)const; type_name get_action_type(name action)const; type_name get_table_type(name action)const; optional<string> get_error_message( uint64_t error_code )const; fc::variant binary_to_variant( const type_name& type, const bytes& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; fc::variant binary_to_variant( const type_name& type, fc::datastream<const char*>& binary, const fc::microseconds& max_serialization_time, bool short_path = false )const; bytes variant_to_binary( const type_name& type, const fc::variant& var, const fc::microseconds& max_serialization_time, bool short_path = false )const; void variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream<char*>& ds, const fc::microseconds& max_serialization_time, bool short_path = false )const; template<typename T, typename Resolver> static void to_variant( const T& o, fc::variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ); template<typename T, typename Resolver> static void from_variant( const fc::variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ); template<typename Vec> static bool is_empty_abi(const Vec& abi_vec) { return abi_vec.size() <= 4; } template<typename Vec> static bool to_abi(const Vec& abi_vec, abi_def& abi) { if( !is_empty_abi(abi_vec) ) { /// 4 == packsize of empty Abi fc::datastream<const char*> ds( abi_vec.data(), abi_vec.size() ); fc::raw::unpack( ds, abi ); return true; } return false; } typedef std::function<fc::variant(fc::datastream<const char*>&, bool, bool)> unpack_function; typedef std::function<void(const fc::variant&, fc::datastream<char*>&, bool, bool)> pack_function; void add_specialized_unpack_pack( const string& name, std::pair<abi_serializer::unpack_function, abi_serializer::pack_function> unpack_pack ); static const size_t max_recursion_depth = 32; // arbitrary depth to prevent infinite recursion private: map<type_name, type_name> typedefs; map<type_name, struct_def> structs; map<name,type_name> actions; map<name,type_name> tables; map<uint64_t, string> error_messages; map<type_name, variant_def> variants; map<type_name, pair<unpack_function, pack_function>> built_in_types; void configure_built_in_types(); fc::variant _binary_to_variant( const type_name& type, const bytes& binary, impl::binary_to_variant_context& ctx )const; fc::variant _binary_to_variant( const type_name& type, fc::datastream<const char*>& binary, impl::binary_to_variant_context& ctx )const; void _binary_to_variant( const type_name& type, fc::datastream<const char*>& stream, fc::mutable_variant_object& obj, impl::binary_to_variant_context& ctx )const; bytes _variant_to_binary( const type_name& type, const fc::variant& var, impl::variant_to_binary_context& ctx )const; void _variant_to_binary( const type_name& type, const fc::variant& var, fc::datastream<char*>& ds, impl::variant_to_binary_context& ctx )const; static type_name _remove_bin_extension(const type_name& type); bool _is_type( const type_name& type, impl::abi_traverse_context& ctx )const; void validate( impl::abi_traverse_context& ctx )const; friend struct impl::abi_from_variant; friend struct impl::abi_to_variant; friend struct impl::abi_traverse_context_with_path; }; namespace impl { struct abi_traverse_context { abi_traverse_context( fc::microseconds max_serialization_time ) : max_serialization_time( max_serialization_time ), deadline( fc::time_point::now() ), // init to now, updated below recursion_depth(0) { if( max_serialization_time > fc::microseconds::maximum() - deadline.time_since_epoch() ) { deadline = fc::time_point::maximum(); } else { deadline += max_serialization_time; } } abi_traverse_context( fc::microseconds max_serialization_time, fc::time_point deadline ) : max_serialization_time( max_serialization_time ), deadline( deadline ), recursion_depth(0) {} void check_deadline()const; fc::scoped_exit<std::function<void()>> enter_scope(); protected: fc::microseconds max_serialization_time; fc::time_point deadline; size_t recursion_depth; }; struct empty_path_root {}; struct array_type_path_root { }; struct struct_type_path_root { map<type_name, struct_def>::const_iterator struct_itr; }; struct variant_type_path_root { map<type_name, variant_def>::const_iterator variant_itr; }; using path_root = static_variant<empty_path_root, array_type_path_root, struct_type_path_root, variant_type_path_root>; struct empty_path_item {}; struct array_index_path_item { path_root type_hint; uint32_t array_index = 0; }; struct field_path_item { map<type_name, struct_def>::const_iterator parent_struct_itr; uint32_t field_ordinal = 0; }; struct variant_path_item { map<type_name, variant_def>::const_iterator variant_itr; uint32_t variant_ordinal = 0; }; using path_item = static_variant<empty_path_item, array_index_path_item, field_path_item, variant_path_item>; struct abi_traverse_context_with_path : public abi_traverse_context { abi_traverse_context_with_path( const abi_serializer& abis, fc::microseconds max_serialization_time, const type_name& type ) : abi_traverse_context( max_serialization_time ), abis(abis) { set_path_root(type); } abi_traverse_context_with_path( const abi_serializer& abis, fc::microseconds max_serialization_time, fc::time_point deadline, const type_name& type ) : abi_traverse_context( max_serialization_time, deadline ), abis(abis) { set_path_root(type); } abi_traverse_context_with_path( const abi_serializer& abis, const abi_traverse_context& ctx, const type_name& type ) : abi_traverse_context(ctx), abis(abis) { set_path_root(type); } void set_path_root( const type_name& type ); fc::scoped_exit<std::function<void()>> push_to_path( const path_item& item ); void set_array_index_of_path_back( uint32_t i ); void hint_array_type_if_in_array(); void hint_struct_type_if_in_array( const map<type_name, struct_def>::const_iterator& itr ); void hint_variant_type_if_in_array( const map<type_name, variant_def>::const_iterator& itr ); string get_path_string()const; string maybe_shorten( const string& str ); protected: const abi_serializer& abis; path_root root_of_path; vector<path_item> path; public: bool short_path = false; }; struct binary_to_variant_context : public abi_traverse_context_with_path { using abi_traverse_context_with_path::abi_traverse_context_with_path; }; struct variant_to_binary_context : public abi_traverse_context_with_path { using abi_traverse_context_with_path::abi_traverse_context_with_path; fc::scoped_exit<std::function<void()>> disallow_extensions_unless( bool condition ); bool extensions_allowed()const { return allow_extensions; } protected: bool allow_extensions = true; }; /** * Determine if a type contains ABI related info, perhaps deeply nested * @tparam T - the type to check */ template<typename T> constexpr bool single_type_requires_abi_v() { return std::is_base_of<transaction, T>::value || std::is_same<T, packed_transaction>::value || std::is_same<T, transaction_trace>::value || std::is_same<T, transaction_receipt>::value || std::is_same<T, action_trace>::value || std::is_same<T, signed_transaction>::value || std::is_same<T, signed_block>::value || std::is_same<T, action>::value; } /** * Basic constexpr for a type, aliases the basic check directly * @tparam T - the type to check */ template<typename T> struct type_requires_abi { static constexpr bool value() { return single_type_requires_abi_v<T>(); } }; /** * specialization that catches common container patterns and checks their contained-type * @tparam Container - a templated container type whose first argument is the contained type */ template<template<typename ...> class Container, typename T, typename ...Args > struct type_requires_abi<Container<T, Args...>> { static constexpr bool value() { return single_type_requires_abi_v<T>(); } }; template<typename T> constexpr bool type_requires_abi_v() { return type_requires_abi<T>::value(); } /** * convenience aliases for creating overload-guards based on whether the type contains ABI related info */ template<typename T> using not_require_abi_t = std::enable_if_t<!type_requires_abi_v<T>(), int>; template<typename T> using require_abi_t = std::enable_if_t<type_requires_abi_v<T>(), int>; struct abi_to_variant { /** * template which overloads add for types which are not relvant to ABI information * and can be degraded to the normal ::to_variant(...) processing */ template<typename M, typename Resolver, not_require_abi_t<M> = 1> static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); mvo(name,v); } /** * template which overloads add for types which contain ABI information in their trees * for these types we create new ABI aware visitors */ template<typename M, typename Resolver, require_abi_t<M> = 1> static void add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, abi_traverse_context& ctx ); /** * template which overloads add for vectors of types which contain ABI information in their trees * for these members we call ::add in order to trigger further processing */ template<typename M, typename Resolver, require_abi_t<M> = 1> static void add( mutable_variant_object &mvo, const char* name, const vector<M>& v, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); vector<variant> array; array.reserve(v.size()); for (const auto& iter: v) { mutable_variant_object elem_mvo; add(elem_mvo, "_", iter, resolver, ctx); array.emplace_back(std::move(elem_mvo["_"])); } mvo(name, std::move(array)); } /** * template which overloads add for shared_ptr of types which contain ABI information in their trees * for these members we call ::add in order to trigger further processing */ template<typename M, typename Resolver, require_abi_t<M> = 1> static void add( mutable_variant_object &mvo, const char* name, const std::shared_ptr<M>& v, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); if( !v ) return; mutable_variant_object obj_mvo; add(obj_mvo, "_", *v, resolver, ctx); mvo(name, std::move(obj_mvo["_"])); } template<typename Resolver> struct add_static_variant { mutable_variant_object& obj_mvo; Resolver& resolver; abi_traverse_context& ctx; add_static_variant( mutable_variant_object& o, Resolver& r, abi_traverse_context& ctx ) :obj_mvo(o), resolver(r), ctx(ctx) {} typedef void result_type; template<typename T> void operator()( T& v )const { add(obj_mvo, "_", v, resolver, ctx); } }; template<typename Resolver, typename... Args> static void add( mutable_variant_object &mvo, const char* name, const fc::static_variant<Args...>& v, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); mutable_variant_object obj_mvo; add_static_variant<Resolver> adder(obj_mvo, resolver, ctx); v.visit(adder); mvo(name, std::move(obj_mvo["_"])); } /** * overload of to_variant_object for actions * @tparam Resolver * @param act * @param resolver * @return */ template<typename Resolver> static void add( mutable_variant_object &out, const char* name, const action& act, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); mutable_variant_object mvo; mvo("account", act.account); mvo("name", act.name); mvo("authorization", act.authorization); try { auto abi = resolver(act.account); if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { try { binary_to_variant_context _ctx(*abi, ctx, type); _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place mvo( "data", abi->_binary_to_variant( type, act.data, _ctx )); mvo("hex_data", act.data); } catch(...) { // any failure to serialize data, then leave as not serailzed mvo("data", act.data); } } else { mvo("data", act.data); } } else { mvo("data", act.data); } } catch(...) { mvo("data", act.data); } out(name, std::move(mvo)); } /** * overload of to_variant_object for packed_transaction * @tparam Resolver * @param act * @param resolver * @return */ template<typename Resolver> static void add( mutable_variant_object &out, const char* name, const packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); mutable_variant_object mvo; auto trx = ptrx.get_transaction(); mvo("id", trx.id()); mvo("signatures", ptrx.get_signatures()); mvo("compression", ptrx.get_compression()); mvo("packed_context_free_data", ptrx.get_packed_context_free_data()); mvo("context_free_data", ptrx.get_context_free_data()); mvo("packed_trx", ptrx.get_packed_transaction()); add(mvo, "transaction", trx, resolver, ctx); out(name, std::move(mvo)); } }; /** * Reflection visitor that uses a resolver to resolve ABIs for nested types * this will degrade to the common fc::to_variant as soon as the type no longer contains * ABI related info * * @tparam Reslover - callable with the signature (const name& code_account) -> optional<abi_def> */ template<typename T, typename Resolver> class abi_to_variant_visitor { public: abi_to_variant_visitor( mutable_variant_object& _mvo, const T& _val, Resolver _resolver, abi_traverse_context& _ctx ) :_vo(_mvo) ,_val(_val) ,_resolver(_resolver) ,_ctx(_ctx) {} /** * Visit a single member and add it to the variant object * @tparam Member - the member to visit * @tparam Class - the class we are traversing * @tparam member - pointer to the member * @param name - the name of the member */ template<typename Member, class Class, Member (Class::*member) > void operator()( const char* name )const { abi_to_variant::add( _vo, name, (_val.*member), _resolver, _ctx ); } private: mutable_variant_object& _vo; const T& _val; Resolver _resolver; abi_traverse_context& _ctx; }; struct abi_from_variant { /** * template which overloads extract for types which are not relvant to ABI information * and can be degraded to the normal ::from_variant(...) processing */ template<typename M, typename Resolver, not_require_abi_t<M> = 1> static void extract( const variant& v, M& o, Resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); from_variant(v, o); } /** * template which overloads extract for types which contain ABI information in their trees * for these types we create new ABI aware visitors */ template<typename M, typename Resolver, require_abi_t<M> = 1> static void extract( const variant& v, M& o, Resolver resolver, abi_traverse_context& ctx ); /** * template which overloads extract for vectors of types which contain ABI information in their trees * for these members we call ::extract in order to trigger further processing */ template<typename M, typename Resolver, require_abi_t<M> = 1> static void extract( const variant& v, vector<M>& o, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); const variants& array = v.get_array(); o.clear(); o.reserve( array.size() ); for( auto itr = array.begin(); itr != array.end(); ++itr ) { M o_iter; extract(*itr, o_iter, resolver, ctx); o.emplace_back(std::move(o_iter)); } } /** * template which overloads extract for shared_ptr of types which contain ABI information in their trees * for these members we call ::extract in order to trigger further processing */ template<typename M, typename Resolver, require_abi_t<M> = 1> static void extract( const variant& v, std::shared_ptr<M>& o, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); M obj; extract(vo, obj, resolver, ctx); o = std::make_shared<M>(obj); } /** * Non templated overload that has priority for the action structure * this type has members which must be directly translated by the ABI so it is * exploded and processed explicitly */ template<typename Resolver> static void extract( const variant& v, action& act, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); ROXE_ASSERT(vo.contains("account"), packed_transaction_type_exception, "Missing account"); ROXE_ASSERT(vo.contains("name"), packed_transaction_type_exception, "Missing name"); from_variant(vo["account"], act.account); from_variant(vo["name"], act.name); if (vo.contains("authorization")) { from_variant(vo["authorization"], act.authorization); } bool valid_empty_data = false; if( vo.contains( "data" ) ) { const auto& data = vo["data"]; if( data.is_string() ) { from_variant(data, act.data); valid_empty_data = act.data.empty(); } else if ( data.is_object() ) { auto abi = resolver(act.account); if (abi.valid()) { auto type = abi->get_action_type(act.name); if (!type.empty()) { variant_to_binary_context _ctx(*abi, ctx, type); _ctx.short_path = true; // Just to be safe while avoiding the complexity of threading an override boolean all over the place act.data = std::move( abi->_variant_to_binary( type, data, _ctx )); valid_empty_data = act.data.empty(); } } } } if( !valid_empty_data && act.data.empty() ) { if( vo.contains( "hex_data" ) ) { const auto& data = vo["hex_data"]; if( data.is_string() ) { from_variant(data, act.data); } } } ROXE_ASSERT(valid_empty_data || !act.data.empty(), packed_transaction_type_exception, "Failed to deserialize data for ${account}:${name}", ("account", act.account)("name", act.name)); } template<typename Resolver> static void extract( const variant& v, packed_transaction& ptrx, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); ROXE_ASSERT(vo.contains("signatures"), packed_transaction_type_exception, "Missing signatures"); ROXE_ASSERT(vo.contains("compression"), packed_transaction_type_exception, "Missing compression"); std::vector<signature_type> signatures; packed_transaction::compression_type compression; from_variant(vo["signatures"], signatures); from_variant(vo["compression"], compression); bytes packed_cfd; std::vector<bytes> cfd; bool use_packed_cfd = false; if( vo.contains("packed_context_free_data") && vo["packed_context_free_data"].is_string() && !vo["packed_context_free_data"].as_string().empty() ) { from_variant(vo["packed_context_free_data"], packed_cfd ); use_packed_cfd = true; } else if( vo.contains("context_free_data") ) { from_variant(vo["context_free_data"], cfd); } if( vo.contains("packed_trx") && vo["packed_trx"].is_string() && !vo["packed_trx"].as_string().empty() ) { bytes packed_trx; from_variant(vo["packed_trx"], packed_trx); if( use_packed_cfd ) { ptrx = packed_transaction( std::move( packed_trx ), std::move( signatures ), std::move( packed_cfd ), compression ); } else { ptrx = packed_transaction( std::move( packed_trx ), std::move( signatures ), std::move( cfd ), compression ); } } else { ROXE_ASSERT(vo.contains("transaction"), packed_transaction_type_exception, "Missing transaction"); if( use_packed_cfd ) { transaction trx; extract( vo["transaction"], trx, resolver, ctx ); ptrx = packed_transaction( std::move(trx), std::move(signatures), std::move(packed_cfd), compression ); } else { signed_transaction trx; extract( vo["transaction"], trx, resolver, ctx ); trx.signatures = std::move( signatures ); trx.context_free_data = std::move(cfd); ptrx = packed_transaction( std::move( trx ), compression ); } } } }; /** * Reflection visitor that uses a resolver to resolve ABIs for nested types * this will degrade to the common fc::from_variant as soon as the type no longer contains * ABI related info * * @tparam Reslover - callable with the signature (const name& code_account) -> optional<abi_def> */ template<typename T, typename Resolver> class abi_from_variant_visitor : public reflector_init_visitor<T> { public: abi_from_variant_visitor( const variant_object& _vo, T& v, Resolver _resolver, abi_traverse_context& _ctx ) : reflector_init_visitor<T>(v) ,_vo(_vo) ,_resolver(_resolver) ,_ctx(_ctx) {} /** * Visit a single member and extract it from the variant object * @tparam Member - the member to visit * @tparam Class - the class we are traversing * @tparam member - pointer to the member * @param name - the name of the member */ template<typename Member, class Class, Member (Class::*member)> void operator()( const char* name )const { auto itr = _vo.find(name); if( itr != _vo.end() ) abi_from_variant::extract( itr->value(), this->obj.*member, _resolver, _ctx ); } private: const variant_object& _vo; Resolver _resolver; abi_traverse_context& _ctx; }; template<typename M, typename Resolver, require_abi_t<M>> void abi_to_variant::add( mutable_variant_object &mvo, const char* name, const M& v, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); mutable_variant_object member_mvo; fc::reflector<M>::visit( impl::abi_to_variant_visitor<M, Resolver>( member_mvo, v, resolver, ctx) ); mvo(name, std::move(member_mvo)); } template<typename M, typename Resolver, require_abi_t<M>> void abi_from_variant::extract( const variant& v, M& o, Resolver resolver, abi_traverse_context& ctx ) { auto h = ctx.enter_scope(); const variant_object& vo = v.get_object(); fc::reflector<M>::visit( abi_from_variant_visitor<M, decltype(resolver)>( vo, o, resolver, ctx ) ); } } /// namespace roxe::chain::impl template<typename T, typename Resolver> void abi_serializer::to_variant( const T& o, variant& vo, Resolver resolver, const fc::microseconds& max_serialization_time ) try { mutable_variant_object mvo; impl::abi_traverse_context ctx(max_serialization_time); impl::abi_to_variant::add(mvo, "_", o, resolver, ctx); vo = std::move(mvo["_"]); } FC_RETHROW_EXCEPTIONS(error, "Failed to serialize: ${type}", ("type", boost::core::demangle( typeid(o).name() ) )) template<typename T, typename Resolver> void abi_serializer::from_variant( const variant& v, T& o, Resolver resolver, const fc::microseconds& max_serialization_time ) try { impl::abi_traverse_context ctx(max_serialization_time); impl::abi_from_variant::extract(v, o, resolver, ctx); } FC_RETHROW_EXCEPTIONS(error, "Failed to deserialize variant", ("variant",v)) } } // roxe::chain
#include <onepass/clientWrapper.hpp> using namespace onepass::client; ClientWrapper::ClientWrapper(int argc, char *argv[]) { handleProgramOptions(argc, argv); } bool ClientWrapper::has(ParameterType optName) { return variablesMap.count(typeToString(optName)) ? true : false; } void ClientWrapper::printHelp() { description->print(std::cout); } void ClientWrapper::get(ParameterType name, std::string &value) { value = get(name); } std::string ClientWrapper::get(ParameterType name) { if (has(name)) return variablesMap[typeToString(name)].as<std::string>(); else { return ""; } } bool ClientWrapper::verifyParams(const ParameterType parameter) { switch (parameter) { case ParameterType::help: return has(ParameterType::help); case ParameterType::version: return has(ParameterType::version); case ParameterType::file: case ParameterType::password: if (!(has(ParameterType::file) && has(ParameterType::password))) return false; case ParameterType::list: return has(ParameterType::list); case ParameterType::init: return has(ParameterType::init) && has(ParameterType::password) && has(ParameterType::file); case ParameterType::add: return has(ParameterType::add) && has(ParameterType::password) && has(ParameterType::file) && has(ParameterType::key) && (has(ParameterType::username) || has(ParameterType::value) || has(ParameterType::tags) || has(ParameterType::url)); case ParameterType::search: return has(ParameterType::search) && has(ParameterType::password) && has(ParameterType::file) && has(ParameterType::keyword); default: throw std::invalid_argument("invalid param combination see help"); } } onepass::storage::EncryptType ClientWrapper::getEncryptType() { if (has(ParameterType::encrypt)) { std::string encryptStr = get(ParameterType::encrypt); if (encryptStr == "none") return onepass::storage::EncryptType::None; return onepass::storage::EncryptType::Salsa2; } return onepass::storage::EncryptType::Salsa2; } onepass::storage::EncodeType ClientWrapper::getEncodeType() { if (has(ParameterType::encoding)) { std::string encodingStr = get(ParameterType::encoding); if (encodingStr == "none") return onepass::storage::EncodeType::None; if (encodingStr == "url") return onepass::storage::EncodeType::Url; if (encodingStr == "base64") return onepass::storage::EncodeType::Base64; return onepass::storage::EncodeType::None; } return onepass::storage::EncodeType::None; } void ClientWrapper::handleProgramOptions(int argc, char *argv[]) { description = new po::options_description("All options provided for this program"); description->add_options() (typeToString(ParameterType::help), "Print this message ") (typeToString(ParameterType::init), "Initialize a database") (typeToString(ParameterType::search), "search for entries") (typeToString(ParameterType::add), "Add or update an entry in the database") (typeToString(ParameterType::list),"shows the lists of passwords -keys- stored in the database") (typeToString(ParameterType::version),"shows OnePass version") (typeToString(ParameterType::encrypt), po::value<std::string>()->default_value(defEncrypt), "Encryption algorithm, posible values: none, salsa2") (typeToString(ParameterType::encoding), po::value<std::string>()->default_value(defEncode), "Encoding, posible values: none, base64, url") (typeToString(ParameterType::password), po::value<std::string>(), "Password for database") (typeToString(ParameterType::file), po::value<std::string>()->default_value(defDatabaseName), "File name and path for the database") (typeToString(ParameterType::key), po::value<std::string>(), "A unique name for an entry in the database") (typeToString(ParameterType::value), po::value<std::string>(), "A strong password") (typeToString(ParameterType::username), po::value<std::string>(), "The user name associated with the password, example: user@domain.com") (typeToString(ParameterType::tags), po::value<std::string>(), "comma separated tags, example: personal, email") (typeToString(ParameterType::url), po::value<std::string>(), "an url, example: https://www.domain.com") (typeToString(ParameterType::email), po::value<std::string>(), "set the email of the account") (typeToString(ParameterType::keyword), po::value<std::string>(), "a text to find inside database") (typeToString(ParameterType::show), "show the actual password") (typeToString(ParameterType::history), "shows modification history"); po::store(po::parse_command_line(argc, argv, *description), variablesMap); po::notify(variablesMap); }
// Copyright (c) 2011-2013 The Bitcoin Core developers // Copyright (c) 2017-2018 The PIVX developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <boost/test/unit_test.hpp> #include <stdint.h> #include <sstream> #include <iomanip> #include <limits> #include <cmath> #include "uint256.h" #include "arith_uint256.h" #include <string> #include "version.h" #include "test/test_alqo.h" BOOST_FIXTURE_TEST_SUITE(arith_uint256_tests, BasicTestingSetup) /// Convert vector to arith_uint256, via uint256 blob inline arith_uint256 arith_uint256V(const std::vector<unsigned char>& vch) { return UintToArith256(uint256(vch)); } const unsigned char R1Array[] = "\x9c\x52\x4a\xdb\xcf\x56\x11\x12\x2b\x29\x12\x5e\x5d\x35\xd2\xd2" "\x22\x81\xaa\xb5\x33\xf0\x08\x32\xd5\x56\xb1\xf9\xea\xe5\x1d\x7d"; const char R1ArrayHex[] = "7D1DE5EAF9B156D53208F033B5AA8122D2d2355d5e12292b121156cfdb4a529c"; const double R1Ldouble = 0.4887374590559308955; // R1L equals roughly R1Ldouble * 2^256 const arith_uint256 R1L = arith_uint256V(std::vector<unsigned char>(R1Array,R1Array+32)); const uint64_t R1LLow64 = 0x121156cfdb4a529cULL; const unsigned char R2Array[] = "\x70\x32\x1d\x7c\x47\xa5\x6b\x40\x26\x7e\x0a\xc3\xa6\x9c\xb6\xbf" "\x13\x30\x47\xa3\x19\x2d\xda\x71\x49\x13\x72\xf0\xb4\xca\x81\xd7"; const arith_uint256 R2L = arith_uint256V(std::vector<unsigned char>(R2Array,R2Array+32)); const char R1LplusR2L[] = "549FB09FEA236A1EA3E31D4D58F1B1369288D204211CA751527CFC175767850C"; const unsigned char ZeroArray[] = "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; const arith_uint256 ZeroL = arith_uint256V(std::vector<unsigned char>(ZeroArray,ZeroArray+32)); const unsigned char OneArray[] = "\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; const arith_uint256 OneL = arith_uint256V(std::vector<unsigned char>(OneArray,OneArray+32)); const unsigned char MaxArray[] = "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"; const arith_uint256 MaxL = arith_uint256V(std::vector<unsigned char>(MaxArray,MaxArray+32)); const arith_uint256 HalfL = (OneL << 255); std::string ArrayToString(const unsigned char A[], unsigned int width) { std::stringstream Stream; Stream << std::hex; for (unsigned int i = 0; i < width; ++i) { Stream<<std::setw(2)<<std::setfill('0')<<(unsigned int)A[width-i-1]; } return Stream.str(); } BOOST_AUTO_TEST_CASE( basics ) // constructors, equality, inequality { BOOST_CHECK(1 == 0+1); // constructor arith_uint256(std::vector<char>): BOOST_CHECK(R1L.ToString() == ArrayToString(R1Array,32)); BOOST_CHECK(R2L.ToString() == ArrayToString(R2Array,32)); BOOST_CHECK(ZeroL.ToString() == ArrayToString(ZeroArray,32)); BOOST_CHECK(OneL.ToString() == ArrayToString(OneArray,32)); BOOST_CHECK(MaxL.ToString() == ArrayToString(MaxArray,32)); BOOST_CHECK(OneL.ToString() != ArrayToString(ZeroArray,32)); // == and != BOOST_CHECK(R1L != R2L); BOOST_CHECK(ZeroL != OneL); BOOST_CHECK(OneL != ZeroL); BOOST_CHECK(MaxL != ZeroL); BOOST_CHECK(~MaxL == ZeroL); BOOST_CHECK( ((R1L ^ R2L) ^ R1L) == R2L); uint64_t Tmp64 = 0xc4dab720d9c7acaaULL; for (unsigned int i = 0; i < 256; ++i) { BOOST_CHECK(ZeroL != (OneL << i)); BOOST_CHECK((OneL << i) != ZeroL); BOOST_CHECK(R1L != (R1L ^ (OneL << i))); BOOST_CHECK(((arith_uint256(Tmp64) ^ (OneL << i) ) != Tmp64 )); } BOOST_CHECK(ZeroL == (OneL << 256)); // String Constructor and Copy Constructor BOOST_CHECK(arith_uint256("0x"+R1L.ToString()) == R1L); BOOST_CHECK(arith_uint256("0x"+R2L.ToString()) == R2L); BOOST_CHECK(arith_uint256("0x"+ZeroL.ToString()) == ZeroL); BOOST_CHECK(arith_uint256("0x"+OneL.ToString()) == OneL); BOOST_CHECK(arith_uint256("0x"+MaxL.ToString()) == MaxL); BOOST_CHECK(arith_uint256(R1L.ToString()) == R1L); BOOST_CHECK(arith_uint256(" 0x"+R1L.ToString()+" ") == R1L); BOOST_CHECK(arith_uint256("") == ZeroL); BOOST_CHECK(R1L == arith_uint256(R1ArrayHex)); BOOST_CHECK(arith_uint256(R1L) == R1L); BOOST_CHECK((arith_uint256(R1L^R2L)^R2L) == R1L); BOOST_CHECK(arith_uint256(ZeroL) == ZeroL); BOOST_CHECK(arith_uint256(OneL) == OneL); // uint64_t constructor BOOST_CHECK( (R1L & arith_uint256("0xffffffffffffffff")) == arith_uint256(R1LLow64)); BOOST_CHECK(ZeroL == arith_uint256(0)); BOOST_CHECK(OneL == arith_uint256(1)); BOOST_CHECK(arith_uint256("0xffffffffffffffff") == arith_uint256(0xffffffffffffffffULL)); // Assignment (from base_uint) arith_uint256 tmpL = ~ZeroL; BOOST_CHECK(tmpL == ~ZeroL); tmpL = ~OneL; BOOST_CHECK(tmpL == ~OneL); tmpL = ~R1L; BOOST_CHECK(tmpL == ~R1L); tmpL = ~R2L; BOOST_CHECK(tmpL == ~R2L); tmpL = ~MaxL; BOOST_CHECK(tmpL == ~MaxL); } void shiftArrayRight(unsigned char* to, const unsigned char* from, unsigned int arrayLength, unsigned int bitsToShift) { for (unsigned int T=0; T < arrayLength; ++T) { unsigned int F = (T+bitsToShift/8); if (F < arrayLength) to[T] = from[F] >> (bitsToShift%8); else to[T] = 0; if (F + 1 < arrayLength) to[T] |= from[(F+1)] << (8-bitsToShift%8); } } void shiftArrayLeft(unsigned char* to, const unsigned char* from, unsigned int arrayLength, unsigned int bitsToShift) { for (unsigned int T=0; T < arrayLength; ++T) { if (T >= bitsToShift/8) { unsigned int F = T-bitsToShift/8; to[T] = from[F] << (bitsToShift%8); if (T >= bitsToShift/8+1) to[T] |= from[F-1] >> (8-bitsToShift%8); } else { to[T] = 0; } } } BOOST_AUTO_TEST_CASE( shifts ) { // "<<" ">>" "<<=" ">>=" unsigned char TmpArray[32]; arith_uint256 TmpL; for (unsigned int i = 0; i < 256; ++i) { shiftArrayLeft(TmpArray, OneArray, 32, i); BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (OneL << i)); TmpL = OneL; TmpL <<= i; BOOST_CHECK(TmpL == (OneL << i)); BOOST_CHECK((HalfL >> (255-i)) == (OneL << i)); TmpL = HalfL; TmpL >>= (255-i); BOOST_CHECK(TmpL == (OneL << i)); shiftArrayLeft(TmpArray, R1Array, 32, i); BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (R1L << i)); TmpL = R1L; TmpL <<= i; BOOST_CHECK(TmpL == (R1L << i)); shiftArrayRight(TmpArray, R1Array, 32, i); BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (R1L >> i)); TmpL = R1L; TmpL >>= i; BOOST_CHECK(TmpL == (R1L >> i)); shiftArrayLeft(TmpArray, MaxArray, 32, i); BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (MaxL << i)); TmpL = MaxL; TmpL <<= i; BOOST_CHECK(TmpL == (MaxL << i)); shiftArrayRight(TmpArray, MaxArray, 32, i); BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (MaxL >> i)); TmpL = MaxL; TmpL >>= i; BOOST_CHECK(TmpL == (MaxL >> i)); } arith_uint256 c1L = arith_uint256(0x0123456789abcdefULL); arith_uint256 c2L = c1L << 128; for (unsigned int i = 0; i < 128; ++i) { BOOST_CHECK((c1L << i) == (c2L >> (128-i))); } for (unsigned int i = 128; i < 256; ++i) { BOOST_CHECK((c1L << i) == (c2L << (i-128))); } } BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ - { BOOST_CHECK(!ZeroL); BOOST_CHECK(!(!OneL)); for (unsigned int i = 0; i < 256; ++i) BOOST_CHECK(!(!(OneL<<i))); BOOST_CHECK(!(!R1L)); BOOST_CHECK(!(!MaxL)); BOOST_CHECK(~ZeroL == MaxL); unsigned char TmpArray[32]; for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = ~R1Array[i]; } BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (~R1L)); BOOST_CHECK(-ZeroL == ZeroL); BOOST_CHECK(-R1L == (~R1L)+1); for (unsigned int i = 0; i < 256; ++i) BOOST_CHECK(-(OneL<<i) == (MaxL << i)); } // Check if doing _A_ _OP_ _B_ results in the same as applying _OP_ onto each // element of Aarray and Barray, and then converting the result into a arith_uint256. #define CHECKBITWISEOPERATOR(_A_,_B_,_OP_) \ for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = _A_##Array[i] _OP_ _B_##Array[i]; } \ BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (_A_##L _OP_ _B_##L)); #define CHECKASSIGNMENTOPERATOR(_A_,_B_,_OP_) \ TmpL = _A_##L; TmpL _OP_##= _B_##L; BOOST_CHECK(TmpL == (_A_##L _OP_ _B_##L)); BOOST_AUTO_TEST_CASE( bitwiseOperators ) { unsigned char TmpArray[32]; CHECKBITWISEOPERATOR(R1,R2,|) CHECKBITWISEOPERATOR(R1,R2,^) CHECKBITWISEOPERATOR(R1,R2,&) CHECKBITWISEOPERATOR(R1,Zero,|) CHECKBITWISEOPERATOR(R1,Zero,^) CHECKBITWISEOPERATOR(R1,Zero,&) CHECKBITWISEOPERATOR(R1,Max,|) CHECKBITWISEOPERATOR(R1,Max,^) CHECKBITWISEOPERATOR(R1,Max,&) CHECKBITWISEOPERATOR(Zero,R1,|) CHECKBITWISEOPERATOR(Zero,R1,^) CHECKBITWISEOPERATOR(Zero,R1,&) CHECKBITWISEOPERATOR(Max,R1,|) CHECKBITWISEOPERATOR(Max,R1,^) CHECKBITWISEOPERATOR(Max,R1,&) arith_uint256 TmpL; CHECKASSIGNMENTOPERATOR(R1,R2,|) CHECKASSIGNMENTOPERATOR(R1,R2,^) CHECKASSIGNMENTOPERATOR(R1,R2,&) CHECKASSIGNMENTOPERATOR(R1,Zero,|) CHECKASSIGNMENTOPERATOR(R1,Zero,^) CHECKASSIGNMENTOPERATOR(R1,Zero,&) CHECKASSIGNMENTOPERATOR(R1,Max,|) CHECKASSIGNMENTOPERATOR(R1,Max,^) CHECKASSIGNMENTOPERATOR(R1,Max,&) CHECKASSIGNMENTOPERATOR(Zero,R1,|) CHECKASSIGNMENTOPERATOR(Zero,R1,^) CHECKASSIGNMENTOPERATOR(Zero,R1,&) CHECKASSIGNMENTOPERATOR(Max,R1,|) CHECKASSIGNMENTOPERATOR(Max,R1,^) CHECKASSIGNMENTOPERATOR(Max,R1,&) uint64_t Tmp64 = 0xe1db685c9a0b47a2ULL; TmpL = R1L; TmpL |= Tmp64; BOOST_CHECK(TmpL == (R1L | arith_uint256(Tmp64))); TmpL = R1L; TmpL |= 0; BOOST_CHECK(TmpL == R1L); TmpL ^= 0; BOOST_CHECK(TmpL == R1L); TmpL ^= Tmp64; BOOST_CHECK(TmpL == (R1L ^ arith_uint256(Tmp64))); } BOOST_AUTO_TEST_CASE( comparison ) // <= >= < > { arith_uint256 TmpL; for (unsigned int i = 0; i < 256; ++i) { TmpL= OneL<< i; BOOST_CHECK( TmpL >= ZeroL && TmpL > ZeroL && ZeroL < TmpL && ZeroL <= TmpL); BOOST_CHECK( TmpL >= 0 && TmpL > 0 && 0 < TmpL && 0 <= TmpL); TmpL |= R1L; BOOST_CHECK( TmpL >= R1L ); BOOST_CHECK( (TmpL == R1L) != (TmpL > R1L)); BOOST_CHECK( (TmpL == R1L) || !( TmpL <= R1L)); BOOST_CHECK( R1L <= TmpL ); BOOST_CHECK( (R1L == TmpL) != (R1L < TmpL)); BOOST_CHECK( (TmpL == R1L) || !( R1L >= TmpL)); BOOST_CHECK(! (TmpL < R1L)); BOOST_CHECK(! (R1L > TmpL)); } } BOOST_AUTO_TEST_CASE( plusMinus ) { arith_uint256 TmpL = 0; BOOST_CHECK(R1L+R2L == arith_uint256(R1LplusR2L)); TmpL += R1L; BOOST_CHECK(TmpL == R1L); TmpL += R2L; BOOST_CHECK(TmpL == R1L + R2L); BOOST_CHECK(OneL+MaxL == ZeroL); BOOST_CHECK(MaxL+OneL == ZeroL); for (unsigned int i = 1; i < 256; ++i) { BOOST_CHECK( (MaxL >> i) + OneL == (HalfL >> (i-1)) ); BOOST_CHECK( OneL + (MaxL >> i) == (HalfL >> (i-1)) ); TmpL = (MaxL>>i); TmpL += OneL; BOOST_CHECK( TmpL == (HalfL >> (i-1)) ); TmpL = (MaxL>>i); TmpL += 1; BOOST_CHECK( TmpL == (HalfL >> (i-1)) ); TmpL = (MaxL>>i); BOOST_CHECK( TmpL++ == (MaxL>>i) ); BOOST_CHECK( TmpL == (HalfL >> (i-1))); } BOOST_CHECK(arith_uint256(0xbedc77e27940a7ULL) + 0xee8d836fce66fbULL == arith_uint256(0xbedc77e27940a7ULL + 0xee8d836fce66fbULL)); TmpL = arith_uint256(0xbedc77e27940a7ULL); TmpL += 0xee8d836fce66fbULL; BOOST_CHECK(TmpL == arith_uint256(0xbedc77e27940a7ULL+0xee8d836fce66fbULL)); TmpL -= 0xee8d836fce66fbULL; BOOST_CHECK(TmpL == 0xbedc77e27940a7ULL); TmpL = R1L; BOOST_CHECK(++TmpL == R1L+1); BOOST_CHECK(R1L -(-R2L) == R1L+R2L); BOOST_CHECK(R1L -(-OneL) == R1L+OneL); BOOST_CHECK(R1L - OneL == R1L+(-OneL)); for (unsigned int i = 1; i < 256; ++i) { BOOST_CHECK((MaxL>>i) - (-OneL) == (HalfL >> (i-1))); BOOST_CHECK((HalfL >> (i-1)) - OneL == (MaxL>>i)); TmpL = (HalfL >> (i-1)); BOOST_CHECK(TmpL-- == (HalfL >> (i-1))); BOOST_CHECK(TmpL == (MaxL >> i)); TmpL = (HalfL >> (i-1)); BOOST_CHECK(--TmpL == (MaxL >> i)); } TmpL = R1L; BOOST_CHECK(--TmpL == R1L-1); } BOOST_AUTO_TEST_CASE( multiply ) { BOOST_CHECK((R1L * R1L).ToString() == "62a38c0486f01e45879d7910a7761bf30d5237e9873f9bff3642a732c4d84f10"); BOOST_CHECK((R1L * R2L).ToString() == "de37805e9986996cfba76ff6ba51c008df851987d9dd323f0e5de07760529c40"); BOOST_CHECK((R1L * ZeroL) == ZeroL); BOOST_CHECK((R1L * OneL) == R1L); BOOST_CHECK((R1L * MaxL) == -R1L); BOOST_CHECK((R2L * R1L) == (R1L * R2L)); BOOST_CHECK((R2L * R2L).ToString() == "ac8c010096767d3cae5005dec28bb2b45a1d85ab7996ccd3e102a650f74ff100"); BOOST_CHECK((R2L * ZeroL) == ZeroL); BOOST_CHECK((R2L * OneL) == R2L); BOOST_CHECK((R2L * MaxL) == -R2L); BOOST_CHECK(MaxL * MaxL == OneL); BOOST_CHECK((R1L * 0) == 0); BOOST_CHECK((R1L * 1) == R1L); BOOST_CHECK((R1L * 3).ToString() == "7759b1c0ed14047f961ad09b20ff83687876a0181a367b813634046f91def7d4"); BOOST_CHECK((R2L * 0x87654321UL).ToString() == "23f7816e30c4ae2017257b7a0fa64d60402f5234d46e746b61c960d09a26d070"); } BOOST_AUTO_TEST_CASE( divide ) { arith_uint256 D1L("AD7133AC1977FA2B7"); arith_uint256 D2L("ECD751716"); BOOST_CHECK((R1L / D1L).ToString() == "00000000000000000b8ac01106981635d9ed112290f8895545a7654dde28fb3a"); BOOST_CHECK((R1L / D2L).ToString() == "000000000873ce8efec5b67150bad3aa8c5fcb70e947586153bf2cec7c37c57a"); BOOST_CHECK(R1L / OneL == R1L); BOOST_CHECK(R1L / MaxL == ZeroL); BOOST_CHECK(MaxL / R1L == 2); BOOST_CHECK_THROW(R1L / ZeroL, uint_error); BOOST_CHECK((R2L / D1L).ToString() == "000000000000000013e1665895a1cc981de6d93670105a6b3ec3b73141b3a3c5"); BOOST_CHECK((R2L / D2L).ToString() == "000000000e8f0abe753bb0afe2e9437ee85d280be60882cf0bd1aaf7fa3cc2c4"); BOOST_CHECK(R2L / OneL == R2L); BOOST_CHECK(R2L / MaxL == ZeroL); BOOST_CHECK(MaxL / R2L == 1); BOOST_CHECK_THROW(R2L / ZeroL, uint_error); } bool almostEqual(double d1, double d2) { return fabs(d1-d2) <= 4*fabs(d1)*std::numeric_limits<double>::epsilon(); } BOOST_AUTO_TEST_CASE( methods ) // GetHex SetHex size() GetLow64 GetSerializeSize, Serialize, Unserialize { BOOST_CHECK(R1L.GetHex() == R1L.ToString()); BOOST_CHECK(R2L.GetHex() == R2L.ToString()); BOOST_CHECK(OneL.GetHex() == OneL.ToString()); BOOST_CHECK(MaxL.GetHex() == MaxL.ToString()); arith_uint256 TmpL(R1L); BOOST_CHECK(TmpL == R1L); TmpL.SetHex(R2L.ToString()); BOOST_CHECK(TmpL == R2L); TmpL.SetHex(ZeroL.ToString()); BOOST_CHECK(TmpL == 0); TmpL.SetHex(HalfL.ToString()); BOOST_CHECK(TmpL == HalfL); TmpL.SetHex(R1L.ToString()); BOOST_CHECK(R1L.size() == 32); BOOST_CHECK(R2L.size() == 32); BOOST_CHECK(ZeroL.size() == 32); BOOST_CHECK(MaxL.size() == 32); BOOST_CHECK(R1L.GetLow64() == R1LLow64); BOOST_CHECK(HalfL.GetLow64() ==0x0000000000000000ULL); BOOST_CHECK(OneL.GetLow64() ==0x0000000000000001ULL); for (unsigned int i = 0; i < 255; ++i) { BOOST_CHECK((OneL << i).getdouble() == ldexp(1.0,i)); } BOOST_CHECK(ZeroL.getdouble() == 0.0); for (int i = 256; i > 53; --i) BOOST_CHECK(almostEqual((R1L>>(256-i)).getdouble(), ldexp(R1Ldouble,i))); uint64_t R1L64part = (R1L>>192).GetLow64(); for (int i = 53; i > 0; --i) // doubles can store all integers in {0,...,2^54-1} exactly { BOOST_CHECK((R1L>>(256-i)).getdouble() == (double)(R1L64part >> (64-i))); } } BOOST_AUTO_TEST_CASE(bignum_SetCompact) { arith_uint256 num; bool fNegative; bool fOverflow; num.SetCompact(0, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x00123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x01003456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x02000056, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x03000000, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x04000000, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x00923456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x01803456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x02800056, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x03800000, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x04800000, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x01123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000000012"); BOOST_CHECK_EQUAL(num.GetCompact(), 0x01120000U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); // Make sure that we don't generate compacts with the 0x00800000 bit set num = 0x80; BOOST_CHECK_EQUAL(num.GetCompact(), 0x02008000U); num.SetCompact(0x01fedcba, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "000000000000000000000000000000000000000000000000000000000000007e"); BOOST_CHECK_EQUAL(num.GetCompact(true), 0x01fe0000U); BOOST_CHECK_EQUAL(fNegative, true); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x02123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000001234"); BOOST_CHECK_EQUAL(num.GetCompact(), 0x02123400U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x03123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000000123456"); BOOST_CHECK_EQUAL(num.GetCompact(), 0x03123456U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x04123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000012345600"); BOOST_CHECK_EQUAL(num.GetCompact(), 0x04123456U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x04923456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000012345600"); BOOST_CHECK_EQUAL(num.GetCompact(true), 0x04923456U); BOOST_CHECK_EQUAL(fNegative, true); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x05009234, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "0000000000000000000000000000000000000000000000000000000092340000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0x05009234U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0x20123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(num.GetHex(), "1234560000000000000000000000000000000000000000000000000000000000"); BOOST_CHECK_EQUAL(num.GetCompact(), 0x20123456U); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, false); num.SetCompact(0xff123456, &fNegative, &fOverflow); BOOST_CHECK_EQUAL(fNegative, false); BOOST_CHECK_EQUAL(fOverflow, true); } BOOST_AUTO_TEST_CASE( getmaxcoverage ) // some more tests just to get 100% coverage { // ~R1L give a base_uint<256> BOOST_CHECK((~~R1L >> 10) == (R1L >> 10)); BOOST_CHECK((~~R1L << 10) == (R1L << 10)); BOOST_CHECK(!(~~R1L < R1L)); BOOST_CHECK(~~R1L <= R1L); BOOST_CHECK(!(~~R1L > R1L)); BOOST_CHECK(~~R1L >= R1L); BOOST_CHECK(!(R1L < ~~R1L)); BOOST_CHECK(R1L <= ~~R1L); BOOST_CHECK(!(R1L > ~~R1L)); BOOST_CHECK(R1L >= ~~R1L); BOOST_CHECK(~~R1L + R2L == R1L + ~~R2L); BOOST_CHECK(~~R1L - R2L == R1L - ~~R2L); BOOST_CHECK(~R1L != R1L); BOOST_CHECK(R1L != ~R1L); unsigned char TmpArray[32]; CHECKBITWISEOPERATOR(~R1,R2,|) CHECKBITWISEOPERATOR(~R1,R2,^) CHECKBITWISEOPERATOR(~R1,R2,&) CHECKBITWISEOPERATOR(R1,~R2,|) CHECKBITWISEOPERATOR(R1,~R2,^) CHECKBITWISEOPERATOR(R1,~R2,&) } BOOST_AUTO_TEST_SUITE_END()
#include <vector> #include <list> #include <map> #include <set> #include <deque> #include <stack> #include <bitset> #include <algorithm> #include <functional> #include <numeric> #include <utility> #include <sstream> #include <iostream> #include <iomanip> #include <cstdio> #include <cmath> #include <cstdlib> #include <ctime> #include <cstring> using namespace std; class TreeSpreading { public: long long countArrangements(int, int, int, int); }; long long dp[15][15][15][15][5]; long long func(int a, int b, int c, int d, int last) { if (a + b + c + d == 0) return 1LL; long long& ans = dp[a][b][c][d][last]; if (ans != -1LL) return ans; ans = 0LL; if (last != 1 && a > 0) ans += func(a - 1, b, c, d, 1); if (last != 2 && b > 0) ans += func(a, b - 1, c, d, 2); if (last != 3 && c > 0) ans += func(a, b, c - 1, d, 3); if (last != 4 && d > 0) ans += func(a, b, c, d - 1, 4); return ans; } long long TreeSpreading::countArrangements(int a, int b, int c, int d) { memset(dp, -1LL, sizeof(dp)); return func(a, b, c, d, 0); } //Powered by [KawigiEdit] 2.0!
/** * @file dual_perceptron.cpp * @author Chase Geigle */ #include <numeric> #include <random> #include "meta/classify/kernel/all.h" #include "meta/classify/classifier/dual_perceptron.h" #include "meta/index/postings_data.h" #include "meta/io/packed.h" #include "meta/util/functional.h" #include "meta/util/printing.h" #include "meta/util/progress.h" #include "meta/utf/utf.h" namespace meta { namespace classify { const util::string_view dual_perceptron::id = "dual-perceptron"; const constexpr double dual_perceptron::default_alpha; const constexpr double dual_perceptron::default_gamma; const constexpr double dual_perceptron::default_bias; const constexpr uint64_t dual_perceptron::default_max_iter; dual_perceptron::dual_perceptron(multiclass_dataset_view docs, std::unique_ptr<kernel::kernel> kernel_fn, double alpha, double gamma, double bias, uint64_t max_iter) : kernel_{std::move(kernel_fn)}, alpha_{alpha}, gamma_{gamma}, bias_{bias}, max_iter_{max_iter} { train(std::move(docs)); } dual_perceptron::dual_perceptron(std::istream& in) : alpha_{io::packed::read<double>(in)}, gamma_{io::packed::read<double>(in)}, bias_{io::packed::read<double>(in)}, max_iter_{io::packed::read<uint64_t>(in)} { // mistake counts auto size = io::packed::read<std::size_t>(in); for (std::size_t i = 0; i < size; ++i) { auto lbl = io::packed::read<class_label>(in); auto& map_ref = weights_[lbl]; auto isize = io::packed::read<std::size_t>(in); for (std::size_t j = 0; j < isize; ++j) { auto id = io::packed::read<learn::instance_id>(in); auto weight = io::packed::read<uint64_t>(in); map_ref.emplace(id, weight); } } // support vectors io::packed::read(in, size); for (std::size_t i = 0; i < size; ++i) { auto id = io::packed::read<learn::instance_id>(in); auto& map_ref = svs_[id]; auto isize = io::packed::read<std::size_t>(in); for (std::size_t j = 0; j < isize; ++j) { auto fid = io::packed::read<learn::feature_id>(in); auto weight = io::packed::read<double>(in); map_ref.emplace_back(fid, weight); } } // kernel function kernel_ = kernel::load_kernel(in); } void dual_perceptron::save(std::ostream& out) const { io::packed::write(out, id); // training parameters io::packed::write(out, alpha_); io::packed::write(out, gamma_); io::packed::write(out, bias_); io::packed::write(out, max_iter_); // mistake counts io::packed::write(out, weights_.size()); for (const auto& pr : weights_) { io::packed::write(out, pr.first); io::packed::write(out, pr.second.size()); for (const auto& ipr : pr.second) { io::packed::write(out, ipr.first); io::packed::write(out, ipr.second); } } // support vectors io::packed::write(out, svs_.size()); for (const auto& pr : svs_) { io::packed::write(out, pr.first); io::packed::write(out, pr.second.size()); for (const auto& ipr : pr.second) { io::packed::write(out, ipr.first); io::packed::write(out, ipr.second); } } // kernel function kernel_->save(out); } void dual_perceptron::train(multiclass_dataset_view docs) { for (auto it = docs.labels_begin(), end = docs.labels_end(); it != end; ++it) weights_[it->first] = {}; for (uint64_t iter = 0; iter < max_iter_; ++iter) { docs.shuffle(); uint64_t error_count = 0; std::stringstream ss; ss << " > iteration " << iter << ": "; printing::progress progress{ss.str(), docs.size()}; uint64_t doc = 0; for (const auto& instance : docs) { progress(doc++); auto guess = classify(instance.weights); auto actual = docs.label(instance); if (guess != actual) { ++error_count; // memorize the training instance if we haven't already if (svs_.find(instance.id) == svs_.end()) svs_[instance.id] = instance.weights; decrease_weight(guess, instance.id); weights_[actual][instance.id]++; } } if (static_cast<double>(error_count) / docs.size() < gamma_) break; } } void dual_perceptron::decrease_weight(const class_label& label, const learn::instance_id& d_id) { auto it = weights_[label].find(d_id); if (it == weights_[label].end()) return; --it->second; if (it->second == 0) weights_[label].erase(it); } class_label dual_perceptron::classify(const feature_vector& doc) const { class_label best_label = weights_.begin()->first; double best_dot = 0; for (const auto& w : weights_) { double dot = 0; for (const auto& mistakes : w.second) { dot += mistakes.second * ((*kernel_)(doc, svs_.at(mistakes.first)) + bias_); } dot *= alpha_; if (dot > best_dot) { best_dot = dot; best_label = w.first; } } return best_label; } template <> std::unique_ptr<classifier> make_classifier<dual_perceptron>(const cpptoml::table& config, multiclass_dataset_view training) { auto alpha = config.get_as<double>("alpha") .value_or(dual_perceptron::default_alpha); auto gamma = config.get_as<double>("gamma") .value_or(dual_perceptron::default_gamma); auto bias = config.get_as<double>("bias").value_or(dual_perceptron::default_bias); auto max_iter = config.get_as<int64_t>("max-iter") .value_or(dual_perceptron::default_max_iter); auto kernel_cfg = config.get_table("kernel"); if (!kernel_cfg) return make_unique<dual_perceptron>(std::move(training), make_unique<kernel::polynomial>(), alpha, gamma, bias, max_iter); return make_unique<dual_perceptron>(std::move(training), kernel::make_kernel(*kernel_cfg), alpha, gamma, bias, max_iter); } } }
//---------------------------------------------------------------------------// // Copyright (c) 2018-2021 Mikhail Komarov <nemo@nil.foundation> // Copyright (c) 2020-2021 Nikita Kaskov <nbering@nil.foundation> // // MIT License // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. //---------------------------------------------------------------------------// // @file Parameters for *single-predicate* ppzkPCD for R1CS. //---------------------------------------------------------------------------// #ifndef CRYPTO3_ZK_R1CS_SP_PPZKPCD_PARAMS_HPP #define CRYPTO3_ZK_R1CS_SP_PPZKPCD_PARAMS_HPP #include <nil/crypto3/zk/snark/systems/pcd/r1cs_pcd/compliance_predicate.hpp> #include <nil/crypto3/zk/snark/systems/pcd/r1cs_pcd/r1cs_pcd_params.hpp> namespace nil { namespace crypto3 { namespace zk { namespace snark { template<typename PCD_ppT> using r1cs_sp_ppzkpcd_compliance_predicate = r1cs_pcd_compliance_predicate<algebra::Fr<typename PCD_ppT::curve_A_pp>>; template<typename PCD_ppT> using r1cs_sp_ppzkpcd_message = r1cs_pcd_message<algebra::Fr<typename PCD_ppT::curve_A_pp>>; template<typename PCD_ppT> using r1cs_sp_ppzkpcd_local_data = r1cs_pcd_local_data<algebra::Fr<typename PCD_ppT::curve_A_pp>>; template<typename PCD_ppT> using r1cs_sp_ppzkpcd_primary_input = r1cs_pcd_compliance_predicate_primary_input<algebra::Fr<typename PCD_ppT::curve_A_pp>>; template<typename PCD_ppT> using r1cs_sp_ppzkpcd_auxiliary_input = r1cs_pcd_compliance_predicate_auxiliary_input<algebra::Fr<typename PCD_ppT::curve_A_pp>>; } // namespace snark } // namespace zk } // namespace crypto3 } // namespace nil #endif // R1CS_SP_PPZKPCD_PARAMS_HPP
/* * * Copyright (C) 2001-2016, OFFIS e.V. * All rights reserved. See COPYRIGHT file for details. * * This software and supporting documentation were developed by * * OFFIS e.V. * R&D Division Health * Escherweg 2 * D-26121 Oldenburg, Germany * * * As an exception of the above notice, the code for OFStandard::strlcpy * and OFStandard::strlcat in this file have been derived from the BSD * implementation which carries the following copyright notice: * * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com> * All rights reserved. See COPYRIGHT file for details. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * * The code for OFStandard::atof has been derived from an implementation * which carries the following copyright notice: * * Copyright 1988 Regents of the University of California * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby granted, * provided that the above copyright notice appear in all copies. The * University of California makes no representations about the * suitability of this software for any purpose. It is provided "as * is" without express or implied warranty. * * * The code for OFStandard::ftoa has been derived from an implementation * which carries the following copyright notice: * * Copyright (c) 1988 Regents of the University of California. * All rights reserved. See COPYRIGHT file for details. * * Redistribution and use in source and binary forms are permitted * provided that the above copyright notice and this paragraph are * duplicated in all such forms and that any documentation, * advertising materials, and other materials related to such * distribution and use acknowledge that the software was developed * by the University of California, Berkeley. The name of the * University may not be used to endorse or promote products derived * from this software without specific prior written permission. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. * * * The "Base64" encoder/decoder has been derived from an implementation * with the following copyright notice: * * Copyright (c) 1999, Bob Withers - bwit@pobox.com * * This code may be freely used for any purpose, either personal or * commercial, provided the authors copyright notice remains intact. * * * Module: ofstd * * Author: Joerg Riesmeier, Marco Eichelberg * * Purpose: Class for various helper functions * */ #include "dcmtk/config/osconfig.h" /* make sure OS specific configuration is included first */ #include "dcmtk/ofstd/ofstd.h" #include "dcmtk/ofstd/ofcond.h" #include "dcmtk/ofstd/offile.h" #include "dcmtk/ofstd/ofstream.h" #include "dcmtk/ofstd/oftuple.h" #include "dcmtk/ofstd/ofmath.h" #define INCLUDE_CMATH #define INCLUDE_CFLOAT #define INCLUDE_CSTRING #define INCLUDE_CSTDIO #define INCLUDE_CCTYPE #define INCLUDE_UNISTD #include "dcmtk/ofstd/ofstdinc.h" BEGIN_EXTERN_C #ifdef HAVE_SYS_STAT_H #include <sys/stat.h> /* for stat() */ #endif #ifdef HAVE_IO_H #include <io.h> /* for access() on Win32 */ #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> /* for opendir() and closedir() */ #endif #ifdef HAVE_DIRENT_H #include <dirent.h> /* for opendir() and closedir() */ #else #define dirent direct #ifdef HAVE_SYS_NDIR_H #include <sys/ndir.h> #endif #ifdef HAVE_SYS_DIR_H #include <sys/dir.h> #endif #ifdef HAVE_NDIR_H #include <ndir.h> #endif #endif #ifdef HAVE_FNMATCH_H #include <fnmatch.h> /* for fnmatch() */ #endif #ifdef HAVE_IEEEFP_H #include <ieeefp.h> /* for finite() on Solaris 2.5.1 */ #endif #ifdef HAVE_SYS_UTSNAME_H #include <sys/utsname.h> #endif END_EXTERN_C #ifdef HAVE_WINDOWS_H #include <windows.h> /* for GetFileAttributes() */ #include <direct.h> /* for _mkdir() */ #include <lm.h> /* for NetWkstaUserGetInfo */ #ifndef R_OK /* Windows defines access() but not the constants */ #define W_OK 02 /* Write permission */ #define R_OK 04 /* Read permission */ #define F_OK 00 /* Existence only */ #endif /* !R_OK */ #endif /* HAVE_WINDOWS_H */ #ifdef _WIN32 #include <process.h> /* needed for declaration of getpid() */ #endif #include "dcmtk/ofstd/ofnetdb.h" #include "dcmtk/ofstd/ofgrp.h" #include "dcmtk/ofstd/ofpwd.h" #include "dcmtk/ofstd/ofoption.h" // --- ftoa() processing flags --- const unsigned int OFStandard::ftoa_format_e = 0x01; const unsigned int OFStandard::ftoa_format_f = 0x02; const unsigned int OFStandard::ftoa_uppercase = 0x04; const unsigned int OFStandard::ftoa_alternate = 0x08; const unsigned int OFStandard::ftoa_leftadj = 0x10; const unsigned int OFStandard::ftoa_zeropad = 0x20; // --- string functions --- #ifndef HAVE_STRLCPY /* * Copy src to string dst of size siz. At most siz-1 characters * will be copied. Always NUL terminates (unless siz == 0). * Returns strlen(src); if retval >= siz, truncation occurred. */ size_t OFStandard::my_strlcpy(char *dst, const char *src, size_t siz) { char *d = dst; const char *s = src; size_t n = siz; /* Copy as many bytes as will fit */ if (n != 0 && --n != 0) { do { if ((*d++ = *s++) == 0) break; } while (--n != 0); } /* Not enough room in dst, add NUL and traverse rest of src */ if (n == 0) { if (siz != 0) *d = '\0'; /* NUL-terminate dst */ while (*s++) /* do_nothing */ ; } return(s - src - 1); /* count does not include NUL */ } #endif /* HAVE_STRLCPY */ #ifndef HAVE_STRLCAT /* * Appends src to string dst of size siz (unlike strncat, siz is the * full size of dst, not space left). At most siz-1 characters * will be copied. Always NUL terminates (unless siz <= strlen(dst)). * Returns strlen(src) + MIN(siz, strlen(initial dst)). * If retval >= siz, truncation occurred. */ size_t OFStandard::my_strlcat(char *dst, const char *src, size_t siz) { char *d = dst; const char *s = src; size_t n = siz; size_t dlen; /* Find the end of dst and adjust bytes left but don't go past end */ while (n-- != 0 && *d != '\0') d++; dlen = d - dst; n = siz - dlen; if (n == 0) return(dlen + strlen(s)); while (*s != '\0') { if (n != 1) { *d++ = *s; n--; } s++; } *d = '\0'; return(dlen + (s - src)); /* count does not include NUL */ } #endif /* HAVE_STRLCAT */ #ifdef HAVE_PROTOTYPE_STRERROR_R /* * convert a given error code to a string. This function wraps the various * approaches found on different systems. Internally, the standard function * strerror() or strerror_r() is used. */ const char *OFStandard::strerror(const int errnum, char *buf, const size_t buflen) { const char *result = ""; if ((buf != NULL) && (buflen > 0)) { // be paranoid and initialize the buffer to empty string buf[0] = 0; // two incompatible interfaces for strerror_r with different return types exist #ifdef HAVE_CHARP_STRERROR_R // we're using the GNU specific version that returns the result, which may // or may not be a pointer to buf result = strerror_r(errnum, buf, buflen); #else // we're using the X/OPEN version that always stores the result in buf (void) strerror_r(errnum, buf, buflen); result = buf; #endif } return result; } #else const char *OFStandard::strerror(const int errnum, char * /*buf*/, const size_t /*buflen*/) { // we only have strerror() which is thread unsafe on Posix platforms, but thread safe on Windows return STDIO_NAMESPACE strerror(errnum); } #endif OFString &OFStandard::toUpper(OFString &result, const OFString &value) { result = value; return OFStandard::toUpper(result); } OFString &OFStandard::toUpper(OFString &value) { const size_t length = value.length(); unsigned char c; for (size_t i = 0; i < length; i++) { c = value.at(i); value.at(i) = OFstatic_cast(char, toupper(c)); } return value; } OFString &OFStandard::toLower(OFString &result, const OFString &value) { result = value; return OFStandard::toLower(result); } OFString &OFStandard::toLower(OFString &value) { const size_t length = value.length(); unsigned char c; for (size_t i = 0; i < length; i++) { c = value.at(i); value.at(i) = OFstatic_cast(char, tolower(c)); } return value; } // --- file system functions --- OFBool OFStandard::pathExists(const OFFilename &pathName) { OFBool result = OFFalse; /* check for valid path name (avoid NULL or empty string) */ if (!pathName.isEmpty()) { #if HAVE_ACCESS /* check existence with "access()" */ #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (pathName.usesWideChars()) result = (_waccess(pathName.getWideCharPointer(), F_OK) == 0); else #endif result = (access(pathName.getCharPointer(), F_OK) == 0); #else /* HAVE_ACCESS */ #ifdef HAVE_WINDOWS_H /* get file attributes */ DWORD fileAttr; #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (pathName.usesWideChars()) fileAttr = GetFileAttributesW(pathName.getWideCharPointer()); else #endif fileAttr = GetFileAttributes(pathName.getCharPointer()); result = (fileAttr != 0xffffffff); #else /* HAVE_WINDOWS_H */ #ifdef HAVE_SYS_STAT_H /* check existence with "stat()" */ struct stat stat_buf; result = (stat(pathName.getCharPointer(), &stat_buf) == 0); #else /* try to open the given "file" (or directory) in read-only mode */ OFFile file; result = file.fopen(pathName, "r"); file.fclose(); #endif /* HAVE_SYS_STAT_H */ #endif /* HAVE_WINDOWS_H */ #endif /* HAVE_ACCESS */ } return result; } OFBool OFStandard::fileExists(const OFFilename &fileName) { OFBool result = OFFalse; /* check for valid file name (avoid NULL or empty string) */ if (!fileName.isEmpty()) { #ifdef HAVE_WINDOWS_H /* get file attributes */ DWORD fileAttr; #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (fileName.usesWideChars()) fileAttr = GetFileAttributesW(fileName.getWideCharPointer()); else #endif fileAttr = GetFileAttributes(fileName.getCharPointer()); if (fileAttr != 0xffffffff) { /* check file type (not a directory?) */ result = ((fileAttr & FILE_ATTRIBUTE_DIRECTORY) == 0); } #else /* HAVE_WINDOWS_H */ /* check whether path exists (but does not point to a directory) */ result = pathExists(fileName.getCharPointer()) && !dirExists(fileName.getCharPointer()); #endif /* HAVE_WINDOWS_H */ } return result; } OFBool OFStandard::dirExists(const OFFilename &dirName) { OFBool result = OFFalse; /* check for valid directory name (avoid NULL or empty string) */ if (!dirName.isEmpty()) { #ifdef HAVE_WINDOWS_H /* get file attributes of the directory */ DWORD fileAttr; #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (dirName.usesWideChars()) fileAttr = GetFileAttributesW(dirName.getWideCharPointer()); else #endif fileAttr = GetFileAttributes(dirName.getCharPointer()); if (fileAttr != 0xffffffff) { /* check file type (is a directory?) */ result = ((fileAttr & FILE_ATTRIBUTE_DIRECTORY) != 0); } #else /* HAVE_WINDOWS_H */ /* try to open the given directory */ DIR *dirPtr = opendir(dirName.getCharPointer()); if (dirPtr != NULL) { result = OFTrue; closedir(dirPtr); } #endif /* HAVE_WINDOWS_H */ } return result; } OFBool OFStandard::isReadable(const OFFilename &pathName) { OFBool result = OFFalse; /* check for valid path name (avoid NULL or empty string) */ if (!pathName.isEmpty()) { #if HAVE_ACCESS /* check whether the path is readable using "access()" */ #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (pathName.usesWideChars()) result = (_waccess(pathName.getWideCharPointer(), R_OK) == 0); else #endif result = (access(pathName.getCharPointer(), R_OK) == 0); #else /* HAVE_ACCESS */ /* try to open the given "file" (or directory) in read-only mode */ OFFile file; result = file.fopen(pathName, "r"); #endif /* HAVE_ACCESS */ } return result; } OFBool OFStandard::isWriteable(const OFFilename &pathName) { OFBool result = OFFalse; /* check for valid path name (avoid NULL or empty string) */ if (!pathName.isEmpty()) { #if HAVE_ACCESS /* check whether the path is writable using "access()" */ #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (pathName.usesWideChars()) result = (_waccess(pathName.getWideCharPointer(), W_OK) == 0); else #endif result = (access(pathName.getCharPointer(), W_OK) == 0); #else /* HAVE_ACCESS */ /* try to open the given "file" (or directory) in write mode */ OFFile file; result = file.fopen(pathName, "w"); #endif /* HAVE_ACCESS */ } return result; } OFString &OFStandard::getDirNameFromPath(OFString &result, const OFString &pathName, const OFBool assumeDirName) { OFFilename resultFilename; /* call the real function */ getDirNameFromPath(resultFilename, pathName, assumeDirName); /* convert result into a string object */ result = OFSTRING_GUARD(resultFilename.getCharPointer()); return result; } OFFilename &OFStandard::getDirNameFromPath(OFFilename &result, const OFFilename &pathName, const OFBool assumeDirName) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (pathName.usesWideChars()) { const wchar_t *strValue = pathName.getWideCharPointer(); const wchar_t *strPos = wcsrchr(strValue, L'\\' /* WIDE_PATH_SEPARATOR */); /* path separator found? */ if (strPos == NULL) { if (assumeDirName) result = pathName; else result.clear(); } else { wchar_t *tmpString = new wchar_t[strPos - strValue + 1]; wcsncpy(tmpString, strValue, strPos - strValue); tmpString[strPos - strValue] = L'\0'; result.set(tmpString, OFTrue /*convert*/); delete[] tmpString; } } else #endif /* otherwise, use the conventional 8-bit characters version */ { const char *strValue = pathName.getCharPointer(); const char *strPos = strrchr(strValue, PATH_SEPARATOR); /* path separator found? */ if (strPos == NULL) { if (assumeDirName) result = pathName; else result.clear(); } else result.set(OFString(strValue, strPos - strValue)); } return result; } OFString &OFStandard::getFilenameFromPath(OFString &result, const OFString &pathName, const OFBool assumeFilename) { OFFilename resultFilename; /* call the real function */ getFilenameFromPath(resultFilename, pathName, assumeFilename); /* convert result into a string object */ result = OFSTRING_GUARD(resultFilename.getCharPointer()); return result; } OFFilename &OFStandard::getFilenameFromPath(OFFilename &result, const OFFilename &pathName, const OFBool assumeFilename) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (pathName.usesWideChars()) { const wchar_t *strValue = pathName.getWideCharPointer(); const wchar_t *strPos = wcsrchr(strValue, L'\\' /* WIDE_PATH_SEPARATOR */); /* path separator found? */ if (strPos == NULL) { if (assumeFilename) result = pathName; else result.clear(); } else { wchar_t *tmpString = new wchar_t[wcslen(strPos)]; wcscpy(tmpString, strPos + 1); result.set(tmpString, OFTrue /*convert*/); delete[] tmpString; } } else #endif /* otherwise, use the conventional 8-bit characters version */ { const char *strValue = pathName.getCharPointer(); const char *strPos = strrchr(strValue, PATH_SEPARATOR); /* path separator found? */ if (strPos == NULL) { if (assumeFilename) result = pathName; else result.clear(); } else result.set(OFString(strPos + 1)); } return result; } OFString &OFStandard::normalizeDirName(OFString &result, const OFString &dirName, const OFBool allowEmptyDirName) { OFFilename resultFilename; /* call the real function */ normalizeDirName(resultFilename, dirName, allowEmptyDirName); /* convert result into a string object */ result = OFSTRING_GUARD(resultFilename.getCharPointer()); return result; } OFFilename &OFStandard::normalizeDirName(OFFilename &result, const OFFilename &dirName, const OFBool allowEmptyDirName) { /* remove trailing path separators (keep it if appearing at the beginning of the string) */ /* TODO: do we need to check for absolute path containing Windows drive name, e.g. "c:\"? */ #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (dirName.usesWideChars()) { const wchar_t *strValue = dirName.getWideCharPointer(); size_t strLength = (strValue == NULL) ? 0 : wcslen(strValue); while ((strLength > 1) && (strValue[strLength - 1] == L'\\' /* WIDE_PATH_SEPARATOR */)) --strLength; /* avoid "." as a directory name, use empty string instead */ if (allowEmptyDirName && ((strLength == 0) || ((strLength == 1) && (strValue[0] == L'.')))) result.clear(); /* avoid empty directory name (use "." instead) */ else if (!allowEmptyDirName && (strLength == 0)) result.set(L".", OFTrue /*convert*/); /* copy resulting string (omit trailing backslashes) */ else { wchar_t *tmpString = new wchar_t[strLength + 1]; wcsncpy(tmpString, strValue, strLength); tmpString[strLength] = L'\0'; result.set(tmpString, OFTrue /*convert*/); delete[] tmpString; } } else #endif /* otherwise, use the conventional 8-bit characters version */ { const char *strValue = dirName.getCharPointer(); size_t strLength = (strValue == NULL) ? 0 : strlen(strValue); while ((strLength > 1) && (strValue[strLength - 1] == PATH_SEPARATOR)) --strLength; /* avoid "." as a directory name, use empty string instead */ if (allowEmptyDirName && ((strLength == 0) || ((strLength == 1) && (strValue[0] == '.')))) result.clear(); /* avoid empty directory name (use "." instead) */ else if (!allowEmptyDirName && (strLength == 0)) result.set("."); /* copy resulting string (omit trailing backslashes) */ else result.set(OFString(strValue, strLength)); } return result; } OFString &OFStandard::combineDirAndFilename(OFString &result, const OFString &dirName, const OFString &fileName, const OFBool allowEmptyDirName) { OFFilename resultFilename; /* call the real function */ combineDirAndFilename(resultFilename, dirName, fileName, allowEmptyDirName); /* convert result into a string object */ result = OFSTRING_GUARD(resultFilename.getCharPointer()); return result; } OFFilename &OFStandard::combineDirAndFilename(OFFilename &result, const OFFilename &dirName, const OFFilename &fileName, const OFBool allowEmptyDirName) { // # might use system function realpath() in the future to resolve paths including ".." // # or combinations of absolute paths in both 'dirName' and 'fileName' #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (dirName.usesWideChars() || fileName.usesWideChars()) { const wchar_t *strValue = fileName.getWideCharPointer(); size_t strLength = (strValue == NULL) ? 0 : wcslen(strValue); /* check whether 'fileName' contains absolute path */ /* (this check also covers UNC syntax, e.g. "\\server\...") */ if ((strLength > 0) && (strValue[0] == L'\\' /* WIDE_PATH_SEPARATOR */)) { result.set(strValue, OFTrue /*convert*/); return result; } #ifdef HAVE_WINDOWS_H else if (strLength >= 3) { /* check for absolute path containing Windows drive name, e.g. "c:\..." */ const wchar_t c = strValue[0]; if (((c >= L'A') && (c <= L'Z')) || ((c >= L'a') && (c <= L'z'))) { if ((strValue[1] == L':') && (strValue[2] == L'\\' /* WIDE_PATH_SEPARATOR */)) { result.set(strValue, OFTrue /*convert*/); return result; } } } #endif /* we only get here, if we don't have an absolute directory in "fileName" */ /* now normalize the directory name */ normalizeDirName(result, dirName, allowEmptyDirName); /* do some extra checks on a special case */ if (!result.isEmpty() && !result.usesWideChars()) { /* make sure that wide-char version exists */ OFFilename tmpDirName(result); result.set(tmpDirName.getCharPointer(), OFTrue /*convert*/); } /* check file name (ignore empty string and ".") */ if ((strLength > 1) || ((strLength == 1) && (strValue[0] != L'.'))) { if (result.isEmpty()) result.set(strValue, OFTrue /*convert*/); else { const wchar_t *resValue = result.getWideCharPointer(); const size_t resLength = wcslen(resValue); /* should never be 0 */ wchar_t *tmpString = new wchar_t[strLength + resLength + 1 + 1]; wcscpy(tmpString, resValue); /* add path separator (if required) ... */ if (resValue[resLength - 1] != L'\\' /* WIDE_PATH_SEPARATOR */) { tmpString[resLength] = L'\\' /* WIDE_PATH_SEPARATOR */; tmpString[resLength + 1] = L'\0'; } /* ...and file name */ wcscat(tmpString, strValue); result.set(tmpString, OFTrue /*convert*/); delete[] tmpString; } } } else #endif /* otherwise, use the conventional 8-bit characters version */ { const char *strValue = fileName.getCharPointer(); size_t strLength = (strValue == NULL) ? 0 : strlen(strValue); /* check whether 'fileName' contains absolute path */ /* (this check also covers UNC syntax, e.g. "\\server\...") */ if ((strLength > 0) && (strValue[0] == PATH_SEPARATOR)) { result.set(strValue); return result; } #ifdef HAVE_WINDOWS_H else if (strLength >= 3) { /* check for absolute path containing Windows drive name, e.g. "c:\..." */ const char c = strValue[0]; if (((c >= 'A') && (c <= 'Z')) || ((c >= 'a') && (c <= 'z'))) { if ((strValue[1] == ':') && (strValue[2] == '\\')) { result.set(strValue); return result; } } } #endif /* we only get here, if we don't have an absolute directory in "fileName" */ /* now normalize the directory name */ normalizeDirName(result, dirName, allowEmptyDirName); /* check file name (ignore empty string and ".") */ if ((strLength > 1) || ((strLength == 1) && (strValue[0] != '.'))) { if (result.isEmpty()) result.set(strValue); else { const char *resValue = result.getCharPointer(); const size_t resLength = strlen(resValue); /* should never be 0 */ char *tmpString = new char[strLength + resLength + 1 + 1]; strcpy(tmpString, resValue); /* add path separator (if required) ... */ if (resValue[resLength - 1] != PATH_SEPARATOR) { tmpString[resLength] = PATH_SEPARATOR; tmpString[resLength + 1] = '\0'; } /* ...and file name */ strcat(tmpString, strValue); result.set(tmpString); delete[] tmpString; } } } return result; } OFCondition OFStandard::removeRootDirFromPathname(OFFilename &result, const OFFilename &rootDir, const OFFilename &pathName, const OFBool allowLeadingPathSeparator) { OFCondition status = EC_IllegalParameter; #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (rootDir.usesWideChars() || pathName.usesWideChars()) { const wchar_t *rootValue = rootDir.getWideCharPointer(); const wchar_t *pathValue = pathName.getWideCharPointer(); const size_t rootLength = (rootValue == NULL) ? 0 : wcslen(rootValue); const size_t pathLength = (pathValue == NULL) ? 0 : wcslen(pathValue); /* check for empty strings */ if ((rootLength == 0) && (pathLength == 0)) { result.set("", OFTrue /*convert*/); status = EC_Normal; } /* check for empty root dir */ else if (rootLength == 0) { result.set(pathValue, OFTrue /*convert*/); status = EC_Normal; } /* check for "compatible" length */ else if (rootLength <= pathLength) { /* check for same prefix */ if (wcsncmp(rootValue, pathValue, rootLength) == 0) { /* create temporary buffer for destination string */ wchar_t *tmpString = new wchar_t[pathLength - rootLength + 1]; /* remove root dir prefix from path name */ wcscpy(tmpString, pathValue + rootLength); /* remove leading path separator (if present) */ if (!allowLeadingPathSeparator && (tmpString[0] == PATH_SEPARATOR)) result.set(tmpString + 1, OFTrue /*convert*/); else result.set(tmpString, OFTrue /*convert*/); delete[] tmpString; status = EC_Normal; } } } else #endif /* otherwise, use the conventional 8-bit characters version */ { const char *rootValue = rootDir.getCharPointer(); const char *pathValue = pathName.getCharPointer(); const size_t rootLength = (rootValue == NULL) ? 0 : strlen(rootValue); const size_t pathLength = (pathValue == NULL) ? 0 : strlen(pathValue); /* check for empty strings */ if ((rootLength == 0) && (pathLength == 0)) { result.set(""); status = EC_Normal; } /* check for empty root dir */ else if (rootLength == 0) { result.set(pathValue); status = EC_Normal; } /* check for "compatible" length */ else if (rootLength <= pathLength) { /* check for same prefix */ if (strncmp(rootValue, pathValue, rootLength) == 0) { /* create temporary buffer for destination string */ char *tmpString = new char[pathLength - rootLength + 1]; /* remove root dir prefix from path name */ strcpy(tmpString, pathValue + rootLength); /* remove leading path separator (if present) */ if (!allowLeadingPathSeparator && (tmpString[0] == PATH_SEPARATOR)) result.set(tmpString + 1); else result.set(tmpString); delete[] tmpString; status = EC_Normal; } } } /* return empty string in case of error */ if (status.bad()) result.clear(); return status; } OFFilename &OFStandard::appendFilenameExtension(OFFilename &result, const OFFilename &fileName, const OFFilename &fileExtension) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (fileName.usesWideChars()) { OFFilename fileExt(fileExtension); /* convert file extension to wide chars (if needed) */ if (!fileExt.isEmpty() && !fileExt.usesWideChars()) fileExt.set(fileExtension.getCharPointer(), OFTrue /*convert*/); const wchar_t *namValue = fileName.getWideCharPointer(); const wchar_t *extValue = fileExt.getWideCharPointer(); size_t namLength = (namValue == NULL) ? 0 : wcslen(namValue); size_t extLength = (extValue == NULL) ? 0 : wcslen(extValue); /* create temporary buffer for destination string */ wchar_t *tmpString = new wchar_t[namLength + extLength + 1]; wcscpy(tmpString, namValue); if (extValue != NULL) wcscat(tmpString, extValue); result.set(tmpString, OFTrue /*convert*/); delete[] tmpString; } else #endif /* otherwise, use the conventional 8-bit characters version */ { const char *namValue = fileName.getCharPointer(); const char *extValue = fileExtension.getCharPointer(); size_t namLength = (namValue == NULL) ? 0 : strlen(namValue); size_t extLength = (extValue == NULL) ? 0 : strlen(extValue); /* create temporary buffer for destination string */ char *tmpString = new char[namLength + extLength + 1]; strcpy(tmpString, (namValue == NULL) ? "" : namValue); if (extValue != NULL) strcat(tmpString, extValue); result.set(tmpString); delete[] tmpString; } return result; } size_t OFStandard::searchDirectoryRecursively(const OFString &directory, OFList<OFString> &fileList, const OFString &pattern, const OFString &dirPrefix, const OFBool recurse) { OFList<OFFilename> filenameList; /* call the real function */ const size_t result = searchDirectoryRecursively(directory, filenameList, pattern, dirPrefix, recurse); /* copy all list entries to reference parameter */ OFListIterator(OFFilename) iter = filenameList.begin(); OFListIterator(OFFilename) last = filenameList.end(); while (iter != last) { fileList.push_back(OFSTRING_GUARD((*iter).getCharPointer())); ++iter; } return result; } size_t OFStandard::searchDirectoryRecursively(const OFFilename &directory, OFList<OFFilename> &fileList, const OFFilename &pattern, const OFFilename &dirPrefix, const OFBool recurse) { const size_t initialSize = fileList.size(); OFFilename dirName, pathName, tmpString; combineDirAndFilename(dirName, dirPrefix, directory); #ifdef HAVE_WINDOWS_H /* check whether given directory exists */ if (dirExists(dirName)) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (dirName.usesWideChars()) { HANDLE handle; WIN32_FIND_DATAW data; /* check whether file pattern is given */ if (!pattern.isEmpty()) { /* first, search for matching files on this directory level */ handle = FindFirstFileW(combineDirAndFilename(tmpString, dirName, pattern, OFTrue /*allowEmptyDirName*/).getWideCharPointer(), &data); if (handle != INVALID_HANDLE_VALUE) { do { /* avoid leading "." */ if (wcscmp(dirName.getWideCharPointer(), L".") == 0) pathName.set(data.cFileName, OFTrue /*convert*/); else combineDirAndFilename(pathName, directory, data.cFileName, OFTrue /*allowEmptyDirName*/); /* ignore directories and the like */ if (fileExists(combineDirAndFilename(tmpString, dirPrefix, pathName, OFTrue /*allowEmptyDirName*/))) fileList.push_back(pathName); } while (FindNextFileW(handle, &data)); FindClose(handle); } } /* then search for _any_ file/directory entry */ handle = FindFirstFileW(combineDirAndFilename(tmpString, dirName, L"*.*", OFTrue /*allowEmptyDirName*/).getWideCharPointer(), &data); if (handle != INVALID_HANDLE_VALUE) { do { /* filter out current and parent directory */ if ((wcscmp(data.cFileName, L".") != 0) && (wcscmp(data.cFileName, L"..") != 0)) { /* avoid leading "." */ if (wcscmp(dirName.getWideCharPointer(), L".") == 0) pathName.set(data.cFileName, OFTrue /*convert*/); else combineDirAndFilename(pathName, directory, data.cFileName, OFTrue /*allowEmptyDirName*/); if (dirExists(combineDirAndFilename(tmpString, dirPrefix, pathName, OFTrue /*allowEmptyDirName*/))) { /* recursively search sub directories */ if (recurse) searchDirectoryRecursively(pathName, fileList, pattern, dirPrefix, recurse); } else if (pattern.isEmpty()) { /* add filename to the list (if no pattern is given) */ fileList.push_back(pathName); } } } while (FindNextFileW(handle, &data)); FindClose(handle); } } else #endif /* otherwise, use the conventional 8-bit characters version */ { HANDLE handle; WIN32_FIND_DATAA data; /* check whether file pattern is given */ if (!pattern.isEmpty()) { /* first, search for matching files on this directory level */ handle = FindFirstFileA(combineDirAndFilename(tmpString, dirName, pattern, OFTrue /*allowEmptyDirName*/).getCharPointer(), &data); if (handle != INVALID_HANDLE_VALUE) { do { /* avoid leading "." */ if (strcmp(dirName.getCharPointer(), ".") == 0) pathName.set(data.cFileName); else combineDirAndFilename(pathName, directory, data.cFileName, OFTrue /*allowEmptyDirName*/); /* ignore directories and the like */ if (fileExists(combineDirAndFilename(tmpString, dirPrefix, pathName, OFTrue /*allowEmptyDirName*/))) fileList.push_back(pathName); } while (FindNextFileA(handle, &data)); FindClose(handle); } } /* then search for _any_ file/directory entry */ handle = FindFirstFileA(combineDirAndFilename(tmpString, dirName, "*.*", OFTrue /*allowEmptyDirName*/).getCharPointer(), &data); if (handle != INVALID_HANDLE_VALUE) { do { /* filter out current and parent directory */ if ((strcmp(data.cFileName, ".") != 0) && (strcmp(data.cFileName, "..") != 0)) { /* avoid leading "." */ if (strcmp(dirName.getCharPointer(), ".") == 0) pathName.set(data.cFileName); else combineDirAndFilename(pathName, directory, data.cFileName, OFTrue /*allowEmptyDirName*/); if (dirExists(combineDirAndFilename(tmpString, dirPrefix, pathName, OFTrue /*allowEmptyDirName*/))) { /* recursively search sub directories */ if (recurse) searchDirectoryRecursively(pathName, fileList, pattern, dirPrefix, recurse); } else if (pattern.isEmpty()) { /* add filename to the list (if no pattern is given) */ fileList.push_back(pathName); } } } while (FindNextFileA(handle, &data)); FindClose(handle); } } } #else /* try to open the directory */ DIR *dirPtr = opendir(dirName.getCharPointer()); if (dirPtr != NULL) { struct dirent *entry = NULL; #ifdef HAVE_READDIR_R dirent d = {}; while (!readdir_r(dirPtr, &d, &entry) && entry) #else while ((entry = readdir(dirPtr)) != NULL) #endif { /* filter out current (".") and parent directory ("..") */ if ((strcmp(entry->d_name, ".") != 0) && (strcmp(entry->d_name, "..") != 0)) { /* avoid leading "." */ if (strcmp(dirName.getCharPointer(), ".") == 0) pathName = entry->d_name; else combineDirAndFilename(pathName, directory, entry->d_name, OFTrue /*allowEmptyDirName*/); if (dirExists(combineDirAndFilename(tmpString, dirPrefix, pathName, OFTrue /*allowEmptyDirName*/))) { /* recursively search sub directories */ if (recurse) searchDirectoryRecursively(pathName, fileList, pattern, dirPrefix, recurse); } else { #ifdef HAVE_FNMATCH_H /* check whether filename matches pattern */ if ((pattern.isEmpty()) || (fnmatch(pattern.getCharPointer(), entry->d_name, FNM_PATHNAME) == 0)) #else /* no pattern matching, sorry :-/ */ #endif fileList.push_back(pathName); } } } closedir(dirPtr); } #endif /* return number of added files */ return fileList.size() - initialSize; } OFCondition OFStandard::createDirectory(const OFFilename &dirName, const OFFilename &rootDir) { OFCondition status = EC_Normal; /* first, check whether the directory already exists */ if (!dirExists(dirName)) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) /* check whether to use the wide-char version of the API function */ if (dirName.usesWideChars()) { /* then, check whether the given prefix can be skipped */ size_t pos = 0; const wchar_t *dirValue = dirName.getWideCharPointer(); const wchar_t *rootValue = rootDir.getWideCharPointer(); size_t dirLength = (dirValue == NULL) ? 0 : wcslen(dirValue); size_t rootLength = (rootValue == NULL) ? 0 : wcslen(rootValue); /* check for absolute path containing Windows drive name, e. g. "c:\", * is not required since the root directory should always exist */ if ((dirLength > 1) && (dirValue[dirLength - 1] == L'\\' /* WIDE_PATH_SEPARATOR */)) { /* ignore trailing path separator */ --dirLength; } if ((rootLength > 1) && (rootValue[rootLength - 1] == L'\\' /* WIDE_PATH_SEPARATOR */)) { /* ignore trailing path separator */ --rootLength; } /* check for "compatible" length */ if ((rootLength > 0) && (rootLength < dirLength)) { /* check for common prefix */ if (wcsncmp(dirValue, rootValue, rootLength) == 0) { /* check whether root directory really exists */ if (dirExists(rootDir)) { /* start searching after the common prefix */ pos = rootLength; } } } /* and finally, iterate over all subsequent subdirectories */ do { /* search for next path separator */ do { ++pos; } while ((dirValue[pos] != L'\\' /* WIDE_PATH_SEPARATOR */) && (dirValue[pos] != L'\0')); /* get name of current directory component */ wchar_t *subDir = new wchar_t[pos + 1]; wcsncpy(subDir, dirValue, pos /*num*/); subDir[pos] = L'\0'; if (!dirExists(subDir)) { /* and create the directory component (if not already existing) */ if (_wmkdir(subDir) == -1) { char errBuf[256]; OFString message("Cannot create directory: "); message.append(strerror(errno, errBuf, sizeof(errBuf))); status = makeOFCondition(0, EC_CODE_CannotCreateDirectory, OF_error, message.c_str()); /* exit the loop */ break; } } delete[] subDir; } while (pos < dirLength); } else #endif /* otherwise, use the conventional 8-bit characters version */ { /* then, check whether the given prefix can be skipped */ size_t pos = 0; const char *dirValue = dirName.getCharPointer(); const char *rootValue = rootDir.getCharPointer(); size_t dirLength = (dirValue == NULL) ? 0 : strlen(dirValue); size_t rootLength = (rootValue == NULL) ? 0 : strlen(rootValue); /* check for absolute path containing Windows drive name, e. g. "c:\", * is not required since the root directory should always exist */ if ((dirLength > 1) && (dirValue[dirLength - 1] == PATH_SEPARATOR)) { /* ignore trailing path separator */ --dirLength; } if ((rootLength > 1) && (rootValue[rootLength - 1] == PATH_SEPARATOR)) { /* ignore trailing path separator */ --rootLength; } /* check for "compatible" length */ if ((rootLength > 0) && (rootLength < dirLength)) { /* check for common prefix */ if (strncmp(dirValue, rootValue, rootLength) == 0) { /* check whether root directory really exists */ if (dirExists(rootDir)) { /* start searching after the common prefix */ pos = rootLength; } } } /* and finally, iterate over all subsequent subdirectories */ do { /* search for next path separator */ do { ++pos; } while ((dirValue[pos] != PATH_SEPARATOR) && (dirValue[pos] != '\0')); /* get name of current directory component */ char *subDir = new char[pos + 1]; strlcpy(subDir, dirValue, pos + 1 /*size*/); if (!dirExists(subDir)) { /* and create the directory component (if not already existing) */ #ifdef HAVE_WINDOWS_H if (_mkdir(subDir) == -1) #else if (mkdir(subDir, S_IRWXU | S_IRWXG | S_IRWXO) == -1) #endif { char errBuf[256]; OFString message("Cannot create directory: "); message.append(strerror(errno, errBuf, sizeof(errBuf))); status = makeOFCondition(0, EC_CODE_CannotCreateDirectory, OF_error, message.c_str()); /* exit the loop */ break; } } delete[] subDir; } while (pos < dirLength); } } return status; } #define COPY_FILE_BUFFER_SIZE 4096 OFBool OFStandard::copyFile(const OFFilename &sourceFilename, const OFFilename &destFilename) { OFBool status = OFFalse; /* avoid NULL or empty string passed to fopen() */ if (!sourceFilename.isEmpty() && !destFilename.isEmpty()) { /* open input file */ OFFile sourceFile; if (sourceFile.fopen(sourceFilename, "rb")) { /* create output file */ OFFile destFile; if (destFile.fopen(destFilename, "wb")) { size_t numRead = 0; size_t numWrite = 0; Uint8 buffer[COPY_FILE_BUFFER_SIZE]; /* read and write data in chunks */ do { numRead = sourceFile.fread(buffer, 1, COPY_FILE_BUFFER_SIZE); } while ((numRead > 0) && ((numWrite = destFile.fwrite(buffer, 1, numRead)) == numRead)); /* check for any errors */ if ((sourceFile.error() == 0) && (destFile.error() == 0)) status = OFTrue; } } } return status; } OFBool OFStandard::deleteFile(const OFFilename &filename) { int err = -1; /* avoid NULL or empty string passed to unlink() */ if (!filename.isEmpty()) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) if (filename.usesWideChars()) err = _wunlink(filename.getWideCharPointer()); else #endif err = unlink(filename.getCharPointer()); } return (err == 0); } OFBool OFStandard::renameFile(const OFFilename &oldFilename, const OFFilename &newFilename) { int err = -1; /* avoid NULL or empty strings passed to rename() */ if (!oldFilename.isEmpty() && !newFilename.isEmpty()) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) if (oldFilename.usesWideChars() && newFilename.usesWideChars()) err = _wrename(oldFilename.getWideCharPointer(), newFilename.getWideCharPointer()); else { const char *oldName = oldFilename.getCharPointer(); const char *newName = newFilename.getCharPointer(); /* avoid passing invalid values to rename() */ if ((oldName != NULL) && (newName != NULL)) err = rename(oldName, newName); } #else err = rename(oldFilename.getCharPointer(), newFilename.getCharPointer()); #endif } return (err == 0); } size_t OFStandard::getFileSize(const OFFilename &filename) { size_t fileSize = 0; /* avoid NULL or empty strings passed to stat() */ if (!filename.isEmpty()) { #if defined(WIDE_CHAR_FILE_IO_FUNCTIONS) && defined(_WIN32) if (filename.usesWideChars()) { struct _stat64i32 fileStat; if (_wstat(filename.getWideCharPointer(), &fileStat) == 0) fileSize = OFstatic_cast(size_t, fileStat.st_size); } else #endif { struct stat fileStat; if (stat(filename.getCharPointer(), &fileStat) == 0) fileSize = OFstatic_cast(size_t, fileStat.st_size); } } return fileSize; } OFBool OFStandard::checkForMarkupConversion(const OFString &sourceString, const OFBool convertNonASCII, const size_t maxLength) { OFBool result = OFFalse; size_t pos = 0; const size_t strLen = sourceString.length(); /* determine maximum number of characters to be converted */ const size_t length = (maxLength == 0) ? strLen : ((strLen < maxLength) ? strLen : maxLength); /* check for characters to be converted */ while (pos < length) { const size_t c = OFstatic_cast(unsigned char, sourceString.at(pos)); if ((c == '<') || (c == '>') || (c == '&') || (c == '"') || (c == '\'') || (c == 0) || /* a NULL byte should never be added to the output */ (c == 10) || (c == 13) || (convertNonASCII && ((c < 32) || (c >= 127)))) { /* return on the first character that needs to be converted */ result = OFTrue; break; } ++pos; } return result; } OFCondition OFStandard::convertToMarkupStream(STD_NAMESPACE ostream &out, const OFString &sourceString, const OFBool convertNonASCII, const E_MarkupMode markupMode, const OFBool newlineAllowed, const size_t maxLength) { size_t pos = 0; const size_t strLen = sourceString.length(); /* determine maximum number of characters to be converted */ const size_t length = (maxLength == 0) ? strLen : ((strLen < maxLength) ? strLen : maxLength); /* replace HTML/XHTML/XML reserved characters */ while (pos < length) { const char c = sourceString.at(pos); /* less than */ if (c == '<') out << "&lt;"; /* greater than */ else if (c == '>') out << "&gt;"; /* ampersand */ else if (c == '&') out << "&amp;"; /* quotation mark */ else if (c == '"') { /* entity "&quot;" is not defined in HTML 3.2 */ if (markupMode == MM_HTML32) out << "&#34;"; else out << "&quot;"; } /* apostrophe */ else if (c == '\'') { /* entity "&apos;" is not defined in HTML */ if ((markupMode == MM_HTML) || (markupMode == MM_HTML32)) out << "&#39;"; else out << "&apos;"; } /* newline: LF, CR, LF CR, CR LF */ else if ((c == '\012') || (c == '\015')) { if (markupMode == MM_XML) { /* encode CR and LF exactly as specified */ if (c == '\012') out << "&#10;"; // '\n' else out << "&#13;"; // '\r' } else { /* HTML/XHTML mode */ /* skip next character if it belongs to the newline sequence */ if (((c == '\012') && (sourceString[pos + 1] == '\015')) || ((c == '\015') && (sourceString[pos + 1] == '\012'))) ++pos; if (newlineAllowed) { if (markupMode == MM_XHTML) out << "<br />\n"; else out << "<br>\n"; } else out << "&para;"; } } else { const size_t charValue = OFstatic_cast(unsigned char, c); /* other character: ... */ if ((convertNonASCII || (markupMode == MM_HTML32)) && ((charValue < 32) || (charValue >= 127))) { /* convert < #32 and >= #127 to Unicode (ISO Latin-1) */ out << "&#" << charValue << ";"; } else if (charValue != 0) { /* just append (if not a NULL byte) */ out << c; } } ++pos; } return EC_Normal; } const OFString &OFStandard::convertToMarkupString(const OFString &sourceString, OFString &markupString, const OFBool convertNonASCII, const E_MarkupMode markupMode, const OFBool newlineAllowed, const size_t maxLength) { OFStringStream stream; /* call stream variant of convert to markup */ if (OFStandard::convertToMarkupStream(stream, sourceString, convertNonASCII, markupMode, newlineAllowed, maxLength).good()) { stream << OFStringStream_ends; /* convert string stream into a character string */ OFSTRINGSTREAM_GETSTR(stream, buffer_str) markupString.assign(buffer_str); OFSTRINGSTREAM_FREESTR(buffer_str) } else markupString.clear(); return markupString; } OFBool OFStandard::checkForOctalConversion(const OFString &sourceString, const size_t maxLength) { OFBool result = OFFalse; size_t pos = 0; const size_t strLen = sourceString.length(); /* determine maximum number of characters to be converted */ const size_t length = (maxLength == 0) ? strLen : ((strLen < maxLength) ? strLen : maxLength); /* check for characters to be converted */ while (pos < length) { const size_t c = OFstatic_cast(unsigned char, sourceString.at(pos)); if ((c < 32) || (c >= 127)) { /* return on the first character that needs to be converted */ result = OFTrue; break; } ++pos; } return result; } OFCondition OFStandard::convertToOctalStream(STD_NAMESPACE ostream &out, const OFString &sourceString, const size_t maxLength) { size_t pos = 0; const size_t strLen = sourceString.length(); /* determine maximum number of characters to be converted */ const size_t length = (maxLength == 0) ? strLen : ((strLen < maxLength) ? strLen : maxLength); /* switch to octal mode for numbers */ out << STD_NAMESPACE oct << STD_NAMESPACE setfill('0'); while (pos < length) { const char c = sourceString.at(pos); const size_t charValue = OFstatic_cast(unsigned char, c); /* replace non-ASCII characters */ if ((charValue < 32) || (charValue >= 127)) out << '\\' << STD_NAMESPACE setw(3) << charValue; else out << c; ++pos; } /* reset i/o manipulators */ out << STD_NAMESPACE dec << STD_NAMESPACE setfill(' '); return EC_Normal; } const OFString &OFStandard::convertToOctalString(const OFString &sourceString, OFString &octalString, const size_t maxLength) { OFStringStream stream; /* call stream variant of convert to octal notation */ if (OFStandard::convertToOctalStream(stream, sourceString, maxLength).good()) { stream << OFStringStream_ends; /* convert string stream into a character string */ OFSTRINGSTREAM_GETSTR(stream, buffer_str) octalString.assign(buffer_str); OFSTRINGSTREAM_FREESTR(buffer_str) } else octalString.clear(); return octalString; } // Base64 translation table as described in RFC 2045 (MIME) static const char enc_base64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"; OFCondition OFStandard::encodeBase64(STD_NAMESPACE ostream &out, const unsigned char *data, const size_t length, const size_t width) { OFCondition status = EC_IllegalParameter; /* check data buffer to be encoded */ if (data != NULL) { unsigned char c; size_t w = 0; /* iterate over all data elements */ for (size_t i = 0; i < length; i++) { /* encode first 6 bits */ out << enc_base64[(data[i] >> 2) & 0x3f]; /* insert line break (if width > 0) */ if (++w == width) { out << OFendl; w = 0; } /* encode remaining 2 bits of the first byte and 4 bits of the second byte */ c = (data[i] << 4) & 0x3f; if (++i < length) c |= (data[i] >> 4) & 0x0f; out << enc_base64[c]; /* insert line break (if width > 0) */ if (++w == width) { out << OFendl; w = 0; } /* encode remaining 4 bits of the second byte and 2 bits of the third byte */ if (i < length) { c = (data[i] << 2) & 0x3f; if (++i < length) c |= (data[i] >> 6) & 0x03; out << enc_base64[c]; } else { i++; /* append fill char */ out << '='; } /* insert line break (if width > 0) */ if (++w == width) { out << OFendl; w = 0; } /* encode remaining 6 bits of the third byte */ if (i < length) out << enc_base64[data[i] & 0x3f]; else /* append fill char */ out << '='; /* insert line break (if width > 0) */ if (++w == width) { out << OFendl; w = 0; } } /* flush stream */ out.flush(); status = EC_Normal; } return status; } const OFString &OFStandard::encodeBase64(const unsigned char *data, const size_t length, OFString &result, const size_t width) { OFStringStream stream; /* call stream variant of base64 encoder */ if (OFStandard::encodeBase64(stream, data, length, width).good()) { stream << OFStringStream_ends; /* convert string stream into a character string */ OFSTRINGSTREAM_GETSTR(stream, buffer_str) result.assign(buffer_str); OFSTRINGSTREAM_FREESTR(buffer_str) } else result.clear(); return result; } // Base64 decoding table: maps #43..#122 to #0..#63 (255 means invalid) static const unsigned char dec_base64[] = { 62, 255, 255, 255, 63, // '+' .. '/' 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, // '0' .. '9' 255, 255, 255, 255, 255, 255, 255, // ':' .. '@' 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, // 'A' .. 'Z' 255, 255, 255, 255, 255, 255, // '[' .. '`' 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51 // 'a' .. 'z' }; size_t OFStandard::decodeBase64(const OFString &data, unsigned char *&result) { size_t count = 0; /* search for fill char to determine the real length of the input string */ const size_t fillPos = data.find('='); const size_t length = (fillPos != OFString_npos) ? fillPos : data.length(); /* check data buffer to be decoded */ if (length > 0) { /* allocate sufficient memory for the decoded data */ result = new unsigned char[((length + 3) / 4) * 3]; if (result != NULL) { unsigned char c1 = 0; unsigned char c2 = 0; /* iterate over all data elements */ for (size_t i = 0; i < length; i++) { /* skip invalid characters and assign first decoded char */ while ((i < length) && ((data.at(i) < '+') || (data.at(i) > 'z') || ((c1 = dec_base64[data.at(i) - '+']) > 63))) i++; if (++i < length) { /* skip invalid characters and assign second decoded char */ while ((i < length) && ((data.at(i) < '+') || (data.at(i) > 'z') || ((c2 = dec_base64[data.at(i) - '+']) > 63))) i++; if (i < length) { /* decode first byte */ result[count++] = OFstatic_cast(unsigned char, (c1 << 2) | ((c2 >> 4) & 0x3)); if (++i < length) { /* skip invalid characters and assign third decoded char */ while ((i < length) && ((data.at(i) < '+') || (data.at(i) > 'z') || ((c1 = dec_base64[data.at(i) - '+']) > 63))) i++; if (i < length) { /* decode second byte */ result[count++] = OFstatic_cast(unsigned char, ((c2 << 4) & 0xf0) | ((c1 >> 2) & 0xf)); if (++i < length) { /* skip invalid characters and assign fourth decoded char */ while ((i < length) && ((data.at(i) < '+') || (data.at(i) > 'z') || ((c2 = dec_base64[data.at(i) - '+']) > 63))) i++; /* decode third byte */ if (i < length) result[count++] = OFstatic_cast(unsigned char, ((c1 << 6) & 0xc0) | c2); } } } } } } /* delete buffer if no data has been written to the output */ if (count == 0) delete[] result; } } else result = NULL; return count; } #ifdef DISABLE_OFSTD_ATOF // we use sscanf instead of atof because atof doesn't return a status flag double OFStandard::atof(const char *s, OFBool *success) { double result; if (success) { *success = (1 == sscanf(s,"%lf",&result)); } else { (void) sscanf(s,"%lf",&result); } return result; } #else // --- definitions and constants for atof() --- /* Largest possible base 10 exponent. Any exponent larger than this will * already produce underflow or overflow, so there's no need to worry * about additional digits. */ #define ATOF_MAXEXPONENT 511 /* Table giving binary powers of 10. Entry is 10^2^i. * Used to convert decimal exponents into floating-point numbers. */ static const double atof_powersOf10[] = { 10., 100., 1.0e4, 1.0e8, 1.0e16, 1.0e32, 1.0e64, 1.0e128, 1.0e256 }; double OFStandard::atof(const char *s, OFBool *success) { if (success) *success = OFFalse; const char *p = s; char c; int sign = 0; int expSign = 0; double fraction; int exponent = 0; // Exponent read from "EX" field. const char *pExp; // Temporarily holds location of exponent in string. /* Exponent that derives from the fractional part. Under normal * circumstances, it is the negative of the number of digits in F. * However, if I is very long, the last digits of I get dropped * (otherwise a long I with a large negative exponent could cause an * unnecessary overflow on I alone). In this case, fracExp is * incremented one for each dropped digit. */ int fracExp = 0; // Strip off leading blanks and check for a sign. while (isspace(OFstatic_cast(unsigned char, *p))) ++p; if (*p == '-') { sign = 1; ++p; } else { if (*p == '+') ++p; } // Count the number of digits in the mantissa (including the decimal // point), and also locate the decimal point. int decPt = -1; // Number of mantissa digits BEFORE decimal point. int mantSize; // Number of digits in mantissa. for (mantSize = 0; ; ++mantSize) { c = *p; if (!isdigit(OFstatic_cast(unsigned char, c))) { if ((c != '.') || (decPt >= 0)) break; decPt = mantSize; } ++p; } /* * Now suck up the digits in the mantissa. Use two integers to * collect 9 digits each (this is faster than using floating-point). * If the mantissa has more than 18 digits, ignore the extras, since * they can't affect the value anyway. */ pExp = p; p -= mantSize; if (decPt < 0) decPt = mantSize; else mantSize -= 1; // One of the digits was the point if (mantSize > 18) { fracExp = decPt - 18; mantSize = 18; } else { fracExp = decPt - mantSize; } if (mantSize == 0) { // subject sequence does not have expected form. // return 0 and leave success flag set to false return 0.0; } else { int frac1 = 0; for ( ; mantSize > 9; mantSize -= 1) { c = *p; ++p; if (c == '.') { c = *p; ++p; } frac1 = 10*frac1 + (c - '0'); } int frac2 = 0; for (; mantSize > 0; mantSize -= 1) { c = *p; ++p; if (c == '.') { c = *p; ++p; } frac2 = 10*frac2 + (c - '0'); } fraction = (1.0e9 * frac1) + frac2; } // Skim off the exponent. p = pExp; if ((*p == 'E') || (*p == 'e')) { ++p; if (*p == '-') { expSign = 1; ++p; } else { if (*p == '+') ++p; expSign = 0; } while (isdigit(OFstatic_cast(unsigned char, *p))) { exponent = exponent * 10 + (*p - '0'); ++p; } } if (expSign) exponent = fracExp - exponent; else exponent = fracExp + exponent; /* * Generate a floating-point number that represents the exponent. * Do this by processing the exponent one bit at a time to combine * many powers of 2 of 10. Then combine the exponent with the * fraction. */ if (exponent < 0) { expSign = 1; exponent = -exponent; } else expSign = 0; if (exponent > ATOF_MAXEXPONENT) exponent = ATOF_MAXEXPONENT; double dblExp = 1.0; for (const double *d = atof_powersOf10; exponent != 0; exponent >>= 1, ++d) { if (exponent & 01) dblExp *= *d; } if (expSign) fraction /= dblExp; else fraction *= dblExp; if (success) *success = OFTrue; if (sign) return -fraction; return fraction; } #endif /* DISABLE_OFSTD_ATOF */ /* 11-bit exponent (VAX G floating point) is 308 decimal digits */ #define FTOA_MAXEXP 308 /* 128 bit fraction takes up 39 decimal digits; max reasonable precision */ #define FTOA_MAXFRACT 39 /* default precision */ #define FTOA_DEFPREC 6 /* internal buffer size for ftoa code */ #define FTOA_BUFSIZE (FTOA_MAXEXP+FTOA_MAXFRACT+1) #define FTOA_TODIGIT(c) ((c) - '0') #define FTOA_TOCHAR(n) ((n) + '0') #define FTOA_FORMAT_MASK 0x03 /* and mask for format flags */ #define FTOA_FORMAT_E OFStandard::ftoa_format_e #define FTOA_FORMAT_F OFStandard::ftoa_format_f #define FTOA_FORMAT_UPPERCASE OFStandard::ftoa_uppercase #define FTOA_ALTERNATE_FORM OFStandard::ftoa_alternate #define FTOA_LEFT_ADJUSTMENT OFStandard::ftoa_leftadj #define FTOA_ZEROPAD OFStandard::ftoa_zeropad #ifdef DISABLE_OFSTD_FTOA void OFStandard::ftoa( char *dst, size_t siz, double val, unsigned int flags, int width, int prec) { // this version of the function uses sprintf to format the output string. // Since we have to assemble the sprintf format string, this version might // even be slower than the alternative implementation. char buf[FTOA_BUFSIZE]; OFString s("%"); // this will become the format string unsigned char fmtch = 'G'; // check if val is NAN if (OFMath::isnan(val)) { OFStandard::strlcpy(dst, "nan", siz); return; } // check if val is infinity if (OFMath::isinf(val)) { if (val < 0) OFStandard::strlcpy(dst, "-inf", siz); else OFStandard::strlcpy(dst, "inf", siz); return; } // determine format character if (flags & FTOA_FORMAT_UPPERCASE) { if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_E) fmtch = 'E'; else if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_F) fmtch = 'f'; // there is no uppercase for 'f' else fmtch = 'G'; } else { if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_E) fmtch = 'e'; else if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_F) fmtch = 'f'; else fmtch = 'g'; } if (flags & FTOA_ALTERNATE_FORM) s += "#"; if (flags & FTOA_LEFT_ADJUSTMENT) s += "-"; if (flags & FTOA_ZEROPAD) s += "0"; if (width > 0) { sprintf(buf, "%d", width); s += buf; } if (prec >= 0) { sprintf(buf, ".%d", prec); s += buf; } s += fmtch; sprintf(buf, s.c_str(), val); OFStandard::strlcpy(dst, buf, siz); } #else /** internal helper class that maintains a string buffer * to which characters can be written. If the string buffer * gets full, additional characters are discarded. * The string buffer does not guarantee zero termination. */ class FTOAStringBuffer { public: /** constructor * @param theSize desired size of string buffer, in bytes */ FTOAStringBuffer(unsigned long theSize) : buf_(NULL) , offset_(0) , size_(theSize) { if (size_ > 0) buf_ = new char[size_]; } /// destructor ~FTOAStringBuffer() { delete[] buf_; } /** add one character to string buffer. Never overwrites * buffer boundary. * @param c character to add */ inline void put(unsigned char c) { if (buf_ && (offset_ < size_)) buf_[offset_++] = c; } // return pointer to string buffer const char *getBuffer() const { return buf_; } private: /// pointer to string buffer char *buf_; /// current offset within buffer unsigned long offset_; /// size of buffer unsigned long size_; /// private undefined copy constructor FTOAStringBuffer(const FTOAStringBuffer &old); /// private undefined assignment operator FTOAStringBuffer &operator=(const FTOAStringBuffer &obj); }; /** writes the given format character and exponent to output string p. * @param p pointer to target string * @param exponent exponent to print * @param fmtch format character * @return pointer to next unused character in output string */ static char *ftoa_exponent(char *p, int exponent, char fmtch) { char expbuf[FTOA_MAXEXP]; *p++ = fmtch; if (exponent < 0) { exponent = -exponent; *p++ = '-'; } else *p++ = '+'; char *t = expbuf + FTOA_MAXEXP; if (exponent > 9) { do { *--t = OFstatic_cast(char, FTOA_TOCHAR(exponent % 10)); } while ((exponent /= 10) > 9); *--t = OFstatic_cast(char, FTOA_TOCHAR(exponent)); for (; t < expbuf + FTOA_MAXEXP; *p++ = *t++) /* nothing */; } else { *p++ = '0'; *p++ = OFstatic_cast(char, FTOA_TOCHAR(exponent)); } return p; } /** round given fraction and adjust text string if round up. * @param fract fraction to round * @param expon pointer to exponent, may be NULL * @param start pointer to start of string to round * @param end pointer to one char after end of string * @param ch if fract is zero, this character is interpreted as fraction*10 instead * @param signp pointer to sign character, '-' or 0. * @return adjusted pointer to start of rounded string, may be start or start-1. */ static char *ftoa_round(double fract, int *expon, char *start, char *end, char ch, char *signp) { double tmp; if (fract) (void) modf(fract * 10, &tmp); else tmp = FTOA_TODIGIT(ch); if (tmp > 4) { for (;; --end) { if (*end == '.') --end; if (++*end <= '9') break; *end = '0'; if (end == start) { if (expon) /* e/E; increment exponent */ { *end = '1'; ++*expon; } else /* f; add extra digit */ { *--end = '1'; --start; } break; } } } /* ``"%.3f", (double)-0.0004'' gives you a negative 0. */ else if (*signp == '-') { for (;; --end) { if (*end == '.') --end; if (*end != '0') break; if (end == start) *signp = 0; // suppress negative 0 } } return start; } /** convert double value to string, without padding * @param val double value to be formatted * @param prec precision, adjusted for FTOA_MAXFRACT * @param flags formatting flags * @param signp pointer to sign character, '-' or 0. * @param fmtch format character * @param startp pointer to start of target buffer * @param endp pointer to one char after end of target buffer * @return */ static int ftoa_convert(double val, int prec, int flags, char *signp, char fmtch, char *startp, char *endp) { char *p; double fract; int dotrim = 0; int expcnt = 0; int gformat = 0; double integer, tmp; fract = modf(val, &integer); /* get an extra slot for rounding. */ char *t = ++startp; /* * get integer portion of val; put into the end of the buffer; the * .01 is added for modf(356.0 / 10, &integer) returning .59999999... */ for (p = endp - 1; integer; ++expcnt) { tmp = modf(integer / 10, &integer); *p-- = OFstatic_cast(char, FTOA_TOCHAR(OFstatic_cast(int, (tmp + .01) * 10))); } switch(fmtch) { case 'f': /* reverse integer into beginning of buffer */ if (expcnt) { for (; ++p < endp; *t++ = *p); } else *t++ = '0'; /* * if precision required or alternate flag set, add in a * decimal point. */ if (prec || flags & FTOA_ALTERNATE_FORM) *t++ = '.'; /* if requires more precision and some fraction left */ if (fract) { if (prec) do { fract = modf(fract * 10, &tmp); *t++ = OFstatic_cast(char, FTOA_TOCHAR(OFstatic_cast(int, tmp))); } while (--prec && fract); if (fract) { startp = ftoa_round(fract, OFstatic_cast(int *, NULL), startp, t - 1, OFstatic_cast(char, 0), signp); } } for (; prec--; *t++ = '0'); break; case 'e': case 'E': eformat: if (expcnt) { *t++ = *++p; if (prec || flags&FTOA_ALTERNATE_FORM) *t++ = '.'; /* if requires more precision and some integer left */ for (; prec && ++p < endp; --prec) *t++ = *p; /* * if done precision and more of the integer component, * round using it; adjust fract so we don't re-round * later. */ if (!prec && ++p < endp) { fract = 0; startp = ftoa_round(OFstatic_cast(double, 0), &expcnt, startp, t - 1, *p, signp); } /* adjust expcnt for digit in front of decimal */ --expcnt; } /* until first fractional digit, decrement exponent */ else if (fract) { /* adjust expcnt for digit in front of decimal */ for (expcnt = -1;; --expcnt) { fract = modf(fract * 10, &tmp); if (tmp) break; } *t++ = OFstatic_cast(char, FTOA_TOCHAR(OFstatic_cast(int, tmp))); if (prec || flags&FTOA_ALTERNATE_FORM) *t++ = '.'; } else { *t++ = '0'; if (prec || flags&FTOA_ALTERNATE_FORM) *t++ = '.'; } /* if requires more precision and some fraction left */ if (fract) { if (prec) do { fract = modf(fract * 10, &tmp); *t++ = OFstatic_cast(char, FTOA_TOCHAR(OFstatic_cast(int, tmp))); } while (--prec && fract); if (fract) { startp = ftoa_round(fract, &expcnt, startp, t - 1, OFstatic_cast(char, 0), signp); } } /* if requires more precision */ for (; prec--; *t++ = '0'); /* unless alternate flag, trim any g/G format trailing 0's */ if (gformat && !(flags&FTOA_ALTERNATE_FORM)) { while (t > startp && *--t == '0') /* nothing */; if (*t == '.') --t; ++t; } t = ftoa_exponent(t, expcnt, fmtch); break; case 'g': case 'G': /* a precision of 0 is treated as a precision of 1. */ if (!prec) ++prec; /* * ``The style used depends on the value converted; style e * will be used only if the exponent resulting from the * conversion is less than -4 or greater than the precision.'' * -- ANSI X3J11 */ if (expcnt > prec || (!expcnt && fract && fract < .0001)) { /* * g/G format counts "significant digits, not digits of * precision; for the e/E format, this just causes an * off-by-one problem, i.e. g/G considers the digit * before the decimal point significant and e/E doesn't * count it as precision. */ --prec; fmtch = OFstatic_cast(char, fmtch - 2); /* G->E, g->e */ gformat = 1; goto eformat; } /* * reverse integer into beginning of buffer, * note, decrement precision */ if (expcnt) { for (; ++p < endp; *t++ = *p, --prec); } else *t++ = '0'; /* * if precision required or alternate flag set, add in a * decimal point. If no digits yet, add in leading 0. */ if (prec || flags&FTOA_ALTERNATE_FORM) { dotrim = 1; *t++ = '.'; } else dotrim = 0; /* if requires more precision and some fraction left */ if (fract) { if (prec) { do { fract = modf(fract * 10, &tmp); *t++ = OFstatic_cast(char, FTOA_TOCHAR(OFstatic_cast(int, tmp))); } while(!tmp); while (--prec && fract) { fract = modf(fract * 10, &tmp); *t++ = OFstatic_cast(char, FTOA_TOCHAR(OFstatic_cast(int, tmp))); } } if (fract) { startp = ftoa_round(fract, OFstatic_cast(int *, NULL), startp, t - 1, OFstatic_cast(char, 0), signp); } } /* alternate format, adds 0's for precision, else trim 0's */ if (flags&FTOA_ALTERNATE_FORM) for (; prec--; *t++ = '0') /* nothing */; else if (dotrim) { while (t > startp && *--t == '0') /* nothing */; if (*t != '.') ++t; } } /* end switch */ return OFstatic_cast(int, t - startp); } void OFStandard::ftoa( char *dst, size_t siz, double val, unsigned int flags, int width, int prec) { // if target string is NULL or zero bytes long, bail out. if (!dst || !siz) return; // check if val is NAN if (OFMath::isnan(val)) { OFStandard::strlcpy(dst, "nan", siz); return; } // check if val is infinity if (OFMath::isinf(val)) { if (val < 0) OFStandard::strlcpy(dst, "-inf", siz); else OFStandard::strlcpy(dst, "inf", siz); return; } int fpprec = 0; /* `extra' floating precision in [eEfgG] */ char softsign = 0; /* temporary negative sign for floats */ char buf[FTOA_BUFSIZE]; /* space for %c, %[diouxX], %[eEfgG] */ char sign = '\0'; /* sign prefix (' ', '+', '-', or \0) */ int n; unsigned char fmtch = 'G'; FTOAStringBuffer sb(FTOA_BUFSIZE+1); // determine format character if (flags & FTOA_FORMAT_UPPERCASE) { if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_E) fmtch = 'E'; else if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_F) fmtch = 'f'; // there is no uppercase for 'f' else fmtch = 'G'; } else { if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_E) fmtch = 'e'; else if ((flags & FTOA_FORMAT_MASK) == FTOA_FORMAT_F) fmtch = 'f'; else fmtch = 'g'; } // don't do unrealistic precision; just pad it with zeroes later, // so buffer size stays rational. if (prec > FTOA_MAXFRACT) { if ((fmtch != 'g' && fmtch != 'G') || (flags&FTOA_ALTERNATE_FORM)) fpprec = prec - FTOA_MAXFRACT; prec = FTOA_MAXFRACT; } else if (prec == -1) prec = FTOA_DEFPREC; /* * softsign avoids negative 0 if val is < 0 and * no significant digits will be shown */ if (val < 0) { softsign = '-'; val = -val; } else softsign = 0; /* * ftoa_convert may have to round up past the "start" of the * buffer, i.e. ``intf("%.2f", (double)9.999);''; * if the first char isn't \0, it did. */ *buf = 0; int size = ftoa_convert(val, prec, flags, &softsign, fmtch, buf, buf + sizeof(buf)); if (softsign) sign = '-'; char *t = *buf ? buf : buf + 1; /* At this point, `t' points to a string which (if not flags&FTOA_LEFT_ADJUSTMENT) * should be padded out to `width' places. If flags&FTOA_ZEROPAD, it should * first be prefixed by any sign or other prefix; otherwise, it should be * blank padded before the prefix is emitted. After any left-hand * padding, print the string proper, then emit zeroes required by any * leftover floating precision; finally, if FTOA_LEFT_ADJUSTMENT, pad with blanks. * * compute actual size, so we know how much to pad */ int fieldsz = size + fpprec; if (sign) fieldsz++; /* right-adjusting blank padding */ if ((flags & (FTOA_LEFT_ADJUSTMENT|FTOA_ZEROPAD)) == 0 && width) { for (n = fieldsz; n < width; n++) sb.put(' '); } /* prefix */ if (sign) sb.put(sign); /* right-adjusting zero padding */ if ((flags & (FTOA_LEFT_ADJUSTMENT|FTOA_ZEROPAD)) == FTOA_ZEROPAD) for (n = fieldsz; n < width; n++) sb.put('0'); /* the string or number proper */ n = size; while (--n >= 0) sb.put(*t++); /* trailing f.p. zeroes */ while (--fpprec >= 0) sb.put('0'); /* left-adjusting padding (always blank) */ if (flags & FTOA_LEFT_ADJUSTMENT) for (n = fieldsz; n < width; n++) sb.put(' '); /* zero-terminate string */ sb.put(0); /* copy result from char buffer to output array */ const char *c = sb.getBuffer(); if (c) OFStandard::strlcpy(dst, c, siz); else *dst = 0; } #endif /* DISABLE_OFSTD_FTOA */ unsigned int OFStandard::my_sleep(unsigned int seconds) { #ifdef HAVE_WINDOWS_H // on Win32 we use the Sleep() system call which expects milliseconds Sleep(1000*seconds); return 0; #elif defined(HAVE_SLEEP) // just use the original sleep() system call return sleep(seconds); #elif defined(HAVE_USLEEP) // usleep() expects microseconds (void) usleep(OFstatic_cast(unsigned long, seconds)*1000000UL); return 0; #else // don't know how to sleep return 0; #endif } void OFStandard::milliSleep(unsigned int millisecs) { #ifdef HAVE_WINDOWS_H // on Win32 we use the Sleep() system call which expects milliseconds Sleep(millisecs); #elif defined(HAVE_USLEEP) // usleep() expects microseconds (void) usleep(OFstatic_cast(useconds_t, millisecs * 1000UL)); #else struct timeval t; t.tv_sec = millisecs / 1000; t.tv_usec = (millisecs % 1000) * 1000; select(0, NULL, NULL, NULL, &t); #endif } long OFStandard::getProcessID() { #ifdef _WIN32 return _getpid(); #elif defined(HAVE_GETPID) return getpid(); #else return 0; // Workaround for MAC #endif } const unsigned int OFrandr_max = 0x7fffffff; int OFrand_r(unsigned int &seed) { unsigned long val = OFstatic_cast(unsigned long, seed); val = val * 1103515245 + 12345; seed = OFstatic_cast(unsigned int, val %(OFstatic_cast(unsigned long, 0x80000000))); return OFstatic_cast(int, seed); } #define MAX_NAME 65536 OFStandard::OFHostent OFStandard::getHostByName( const char* name ) { #ifdef HAVE_GETHOSTBYNAME_R unsigned int size = 128; char* tmp = new char[size]; hostent* res = NULL; hostent buf; int err = 0; while( gethostbyname_r( name, &buf, tmp, size, &res, &err ) == ERANGE ) { delete[] tmp; if( size >= MAX_NAME ) return NULL; tmp = new char[size*=2]; } OFHostent h( res ); delete[] tmp; return h; #else return OFHostent( gethostbyname( name ) ); #endif } OFStandard::OFHostent OFStandard::getHostByAddr( const char* addr, int len, int type ) { #ifdef HAVE_GETHOSTBYADDR_R unsigned size = 32; char* tmp = new char[size]; hostent* res = NULL; hostent buf; int err = 0; while( gethostbyaddr_r( addr, len, type, &buf, tmp, size, &res, &err ) == ERANGE ) { delete[] tmp; if( size >= MAX_NAME ) return NULL; tmp = new char[size*=2]; } OFHostent h( res ); delete[] tmp; return h; #else return OFHostent( gethostbyaddr( addr, len, type ) ); #endif } #ifdef HAVE_GRP_H OFStandard::OFGroup OFStandard::getGrNam( const char* name ) { #ifdef HAVE_GETGRNAM_R unsigned size = 32; char* tmp = new char[size]; group* res = NULL; group buf; while( getgrnam_r( name, &buf, tmp, size, &res ) == ERANGE ) { delete[] tmp; if( size >= MAX_NAME ) return NULL; tmp = new char[size*=2]; } OFGroup g( res ); delete[] tmp; return g; #elif defined HAVE_GETGRNAM return OFGroup( getgrnam( name ) ); #else return OFGroup( NULL ); #endif } #endif // HAVE_GRP_H #ifdef HAVE_PWD_H OFStandard::OFPasswd OFStandard::getPwNam( const char* name ) { #ifdef HAVE_GETPWNAM_R unsigned size = 32; char* tmp = new char[size]; passwd* res = NULL; passwd buf; while( getpwnam_r( name, &buf, tmp, size, &res ) == ERANGE ) { delete[] tmp; if( size >= MAX_NAME ) return NULL; tmp = new char[size*=2]; } OFPasswd p( res ); delete[] tmp; return p; #elif defined HAVE_GETPWNAM return OFPasswd( getpwnam( name ) ); #else return OFPasswd( NULL ); #endif } #endif // HAVE_PWD_H OFStandard::OFHostent::OFHostent() : h_name() , h_aliases() , h_addr_list() , h_addrtype() , h_length() , ok( OFFalse ) { } OFStandard::OFHostent::OFHostent( hostent* const h ) : h_name() , h_aliases() , h_addr_list() , h_addrtype() , h_length() , ok(h != NULL) { if( ok ) { h_name = h->h_name; h_addrtype = h->h_addrtype; h_length = h->h_length; for( char** a = h->h_aliases; *a; ++a ) h_aliases.push_back( *a ); for( char** b = h->h_addr_list; *b; ++b ) h_addr_list.push_back( OFString( *b, h_length ) ); } } OFBool OFStandard::OFHostent::operator!() const { return !ok; } OFStandard::OFHostent::operator OFBool() const { return ok; } #ifdef HAVE_GRP_H OFStandard::OFGroup::OFGroup() : gr_name() , gr_passwd() , gr_mem() , gr_gid() , ok( OFFalse ) { } OFStandard::OFGroup::OFGroup( group* const g ) : gr_name() , gr_passwd() , gr_mem() , gr_gid() , ok( g != NULL ) { if( ok ) { gr_name = g->gr_name; gr_passwd = g->gr_passwd; gr_gid = g->gr_gid; for( char** m = g->gr_mem; *m; ++m ) gr_mem.push_back( *m ); } } OFBool OFStandard::OFGroup::operator!() const { return !ok; } OFStandard::OFGroup::operator OFBool() const { return ok; } #endif // #ifdef HAVE_GRP_H #ifdef HAVE_PWD_H OFStandard::OFPasswd::OFPasswd() : pw_name() , pw_passwd() , pw_gecos() , pw_dir() , pw_shell() , pw_uid() , pw_gid() , ok( OFFalse ) { } OFStandard::OFPasswd::OFPasswd( passwd* const p ) : pw_name() , pw_passwd() , pw_gecos() , pw_dir() , pw_shell() , pw_uid() , pw_gid() , ok( p != NULL ) { if( ok ) { pw_name = p->pw_name; pw_passwd = p->pw_passwd; pw_uid = p->pw_uid; pw_gid = p->pw_gid; #ifdef HAVE_PASSWD_GECOS pw_gecos = p->pw_gecos; #endif pw_dir = p->pw_dir; pw_shell = p->pw_shell; } } OFBool OFStandard::OFPasswd::operator!() const { return !ok; } OFStandard::OFPasswd::operator OFBool() const { return ok; } #endif // HAVE_PWD_H OFCondition OFStandard::dropPrivileges() { #if defined(HAVE_SETUID) && defined(HAVE_GETUID) if ((setuid(getuid()) != 0) && (errno != EPERM)) { /* setuid returning nonzero means that the setuid() operation has failed. * An errno code of EPERM means that the application was never running with root * privileges, i.e. was not installed with setuid root, which is safe and harmless. * Other error codes (in particular EAGAIN) signal a problem. Most likely the * calling user has already reached the maximum number of permitted processes. * In this case the application should rather terminate than continue with * full root privileges. */ return EC_setuidFailed; } #endif return EC_Normal; } #ifndef DCMTK_USE_CXX11_STL DCMTK_OFSTD_EXPORT OFnullptr_t OFnullptr; DCMTK_OFSTD_EXPORT OFnullopt_t OFnullopt; static const OFignore_t OFignore_value; DCMTK_OFSTD_EXPORT const OFignore_t& OFignore( OFignore_value ); OFtuple<> OFmake_tuple() { return OFtuple<>(); } OFtuple<> OFtie() { return OFtuple<>(); } #endif OFString OFStandard::getUserName() { #ifdef _WIN32 WKSTA_USER_INFO_0 *userinfo; if( NetWkstaUserGetInfo( OFnullptr, 0, OFreinterpret_cast( LPBYTE*, &userinfo ) ) != NERR_Success ) return "<no-user-information-available>"; // Convert the Unicode full name to ANSI. const WCHAR* const name = OFstatic_cast( WCHAR*, userinfo->wkui0_username ); OFVector<char> buf( wcslen( name ) * 2 ); WideCharToMultiByte ( CP_ACP, 0, name, -1, &*buf.begin(), buf.size(), OFnullptr, OFnullptr ); return &*buf.begin(); #elif defined(HAVE_CUSERID) char buf[L_cuserid]; return cuserid( buf ); #elif defined(HAVE_GETLOGIN) #if defined(_REENTRANT) && !defined(_WIN32) && !defined(__CYGWIN__) // use getlogin_r instead of getlogin char buf[513]; if( getlogin_r( buf, 512 ) != 0 ) return "<no-utmp-entry>"; buf[512] = 0; return buf; #else // thread unsafe if( const char* s = getlogin() ) return s; return "<no-utmp-entry>"; #endif #else return "<unknown-user>"; #endif } OFString OFStandard::getHostName() { #ifdef HAVE_UNAME struct utsname n; uname( &n ); return n.nodename; #elif defined(HAVE_GETHOSTNAME) char buf[513]; gethostname( buf, 512 ); buf[512] = 0; return buf; #else return "localhost"; #endif } DCMTK_OFSTD_EXPORT OFin_place_tag OFin_place() { return *static_cast<OFin_place_tag*>(OFnullptr); }
// Copyright 2010-2021, Google Inc. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "spelling/spellchecker_service.h" #include <fstream> #include <string> #include <utility> #include "base/file_util.h" #include "data_manager/data_manager.h" #include "protocol/commands.pb.h" #include "testing/base/public/gmock.h" #include "testing/base/public/googletest.h" #include "testing/base/public/gunit.h" #include "absl/status/status.h" #include "absl/strings/str_cat.h"
/**************************************************************************** ** Meta object code from reading C++ file 'richtextmessagewidget.h' ** ** Created by: The Qt Meta Object Compiler version 67 (Qt 5.12.3) ** ** WARNING! All changes made in this file will be lost! *****************************************************************************/ #include "../../mainwindow/chat/bubble/richtextmessagewidget.h" #include <QtCore/qbytearray.h> #include <QtCore/qmetatype.h> #if !defined(Q_MOC_OUTPUT_REVISION) #error "The header file 'richtextmessagewidget.h' doesn't include <QObject>." #elif Q_MOC_OUTPUT_REVISION != 67 #error "This file was generated using the moc from 5.12.3. It" #error "cannot be used with the include files from this version of Qt." #error "(The moc has changed too much.)" #endif QT_BEGIN_MOC_NAMESPACE QT_WARNING_PUSH QT_WARNING_DISABLE_DEPRECATED struct qt_meta_stringdata_RichTextMessageWidget_t { QByteArrayData data[8]; char stringdata0[103]; }; #define QT_MOC_LITERAL(idx, ofs, len) \ Q_STATIC_BYTE_ARRAY_DATA_HEADER_INITIALIZER_WITH_OFFSET(len, \ qptrdiff(offsetof(qt_meta_stringdata_RichTextMessageWidget_t, stringdata0) + ofs \ - idx * sizeof(QByteArrayData)) \ ) static const qt_meta_stringdata_RichTextMessageWidget_t qt_meta_stringdata_RichTextMessageWidget = { { QT_MOC_LITERAL(0, 0, 21), // "RichTextMessageWidget" QT_MOC_LITERAL(1, 22, 20), // "noticeInputEditFocus" QT_MOC_LITERAL(2, 43, 0), // "" QT_MOC_LITERAL(3, 44, 19), // "signalSerRecallEdit" QT_MOC_LITERAL(4, 64, 8), // "QString&" QT_MOC_LITERAL(5, 73, 3), // "txt" QT_MOC_LITERAL(6, 77, 9), // "onCopyMsg" QT_MOC_LITERAL(7, 87, 15) // "onSerRecallEdit" }, "RichTextMessageWidget\0noticeInputEditFocus\0" "\0signalSerRecallEdit\0QString&\0txt\0" "onCopyMsg\0onSerRecallEdit" }; #undef QT_MOC_LITERAL static const uint qt_meta_data_RichTextMessageWidget[] = { // content: 8, // revision 0, // classname 0, 0, // classinfo 4, 14, // methods 0, 0, // properties 0, 0, // enums/sets 0, 0, // constructors 0, // flags 2, // signalCount // signals: name, argc, parameters, tag, flags 1, 0, 34, 2, 0x06 /* Public */, 3, 1, 35, 2, 0x06 /* Public */, // slots: name, argc, parameters, tag, flags 6, 0, 38, 2, 0x09 /* Protected */, 7, 0, 39, 2, 0x09 /* Protected */, // signals: parameters QMetaType::Void, QMetaType::Void, 0x80000000 | 4, 5, // slots: parameters QMetaType::Void, QMetaType::Void, 0 // eod }; void RichTextMessageWidget::qt_static_metacall(QObject *_o, QMetaObject::Call _c, int _id, void **_a) { if (_c == QMetaObject::InvokeMetaMethod) { auto *_t = static_cast<RichTextMessageWidget *>(_o); Q_UNUSED(_t) switch (_id) { case 0: _t->noticeInputEditFocus(); break; case 1: _t->signalSerRecallEdit((*reinterpret_cast< QString(*)>(_a[1]))); break; case 2: _t->onCopyMsg(); break; case 3: _t->onSerRecallEdit(); break; default: ; } } else if (_c == QMetaObject::IndexOfMethod) { int *result = reinterpret_cast<int *>(_a[0]); { using _t = void (RichTextMessageWidget::*)(); if (*reinterpret_cast<_t *>(_a[1]) == static_cast<_t>(&RichTextMessageWidget::noticeInputEditFocus)) { *result = 0; return; } } { using _t = void (RichTextMessageWidget::*)(QString & ); if (*reinterpret_cast<_t *>(_a[1]) == static_cast<_t>(&RichTextMessageWidget::signalSerRecallEdit)) { *result = 1; return; } } } } QT_INIT_METAOBJECT const QMetaObject RichTextMessageWidget::staticMetaObject = { { &AbstractMessageWidget::staticMetaObject, qt_meta_stringdata_RichTextMessageWidget.data, qt_meta_data_RichTextMessageWidget, qt_static_metacall, nullptr, nullptr } }; const QMetaObject *RichTextMessageWidget::metaObject() const { return QObject::d_ptr->metaObject ? QObject::d_ptr->dynamicMetaObject() : &staticMetaObject; } void *RichTextMessageWidget::qt_metacast(const char *_clname) { if (!_clname) return nullptr; if (!strcmp(_clname, qt_meta_stringdata_RichTextMessageWidget.stringdata0)) return static_cast<void*>(this); return AbstractMessageWidget::qt_metacast(_clname); } int RichTextMessageWidget::qt_metacall(QMetaObject::Call _c, int _id, void **_a) { _id = AbstractMessageWidget::qt_metacall(_c, _id, _a); if (_id < 0) return _id; if (_c == QMetaObject::InvokeMetaMethod) { if (_id < 4) qt_static_metacall(this, _c, _id, _a); _id -= 4; } else if (_c == QMetaObject::RegisterMethodArgumentMetaType) { if (_id < 4) *reinterpret_cast<int*>(_a[0]) = -1; _id -= 4; } return _id; } // SIGNAL 0 void RichTextMessageWidget::noticeInputEditFocus() { QMetaObject::activate(this, &staticMetaObject, 0, nullptr); } // SIGNAL 1 void RichTextMessageWidget::signalSerRecallEdit(QString & _t1) { void *_a[] = { nullptr, const_cast<void*>(reinterpret_cast<const void*>(&_t1)) }; QMetaObject::activate(this, &staticMetaObject, 1, _a); } QT_WARNING_POP QT_END_MOC_NAMESPACE
#include "mediapipe_api/framework/formats/image_frame.h" MpReturnCode mp_ImageFrame__(mediapipe::ImageFrame** image_frame_out) { TRY { *image_frame_out = new mediapipe::ImageFrame(); RETURN_CODE(MpReturnCode::Success); } CATCH_EXCEPTION } MpReturnCode mp_ImageFrame__ui_i_i_ui(mediapipe::ImageFormat::Format format, int width, int height, uint32 alignment_boundary, mediapipe::ImageFrame** image_frame_out) { TRY_ALL { *image_frame_out = new mediapipe::ImageFrame { format, width, height, alignment_boundary }; RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } MpReturnCode mp_ImageFrame__ui_i_i_i_Pui8_PF(mediapipe::ImageFormat::Format format, int width, int height, int width_step, uint8* pixel_data, Deleter* deleter, mediapipe::ImageFrame** image_frame_out) { TRY_ALL { *image_frame_out = new mediapipe::ImageFrame { format, width, height, width_step, pixel_data, deleter }; RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } void mp_ImageFrame__delete(mediapipe::ImageFrame* image_frame) { delete image_frame; } bool mp_ImageFrame__IsEmpty(mediapipe::ImageFrame* image_frame) { return image_frame->IsEmpty(); } MpReturnCode mp_ImageFrame__SetToZero(mediapipe::ImageFrame* image_frame) { TRY { image_frame->SetToZero(); RETURN_CODE(MpReturnCode::Success); } CATCH_EXCEPTION } MpReturnCode mp_ImageFrame__SetAlignmentPaddingAreas(mediapipe::ImageFrame* image_frame) { TRY { image_frame->SetAlignmentPaddingAreas(); RETURN_CODE(MpReturnCode::Success); } CATCH_EXCEPTION } bool mp_ImageFrame__IsContiguous(mediapipe::ImageFrame* image_frame) { return image_frame->IsContiguous(); } MpReturnCode mp_ImageFrame__IsAligned__ui(mediapipe::ImageFrame* image_frame, uint32 alignment_boundary, bool* value_out) { TRY_ALL { *value_out = image_frame->IsAligned(alignment_boundary); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } mediapipe::ImageFormat::Format mp_ImageFrame__Format(mediapipe::ImageFrame* image_frame) { return image_frame->Format(); } int mp_ImageFrame__Width(mediapipe::ImageFrame* image_frame) { return image_frame->Width(); } int mp_ImageFrame__Height(mediapipe::ImageFrame* image_frame) { return image_frame->Height(); } MpReturnCode mp_ImageFrame__ChannelSize(mediapipe::ImageFrame* image_frame, int* value_out) { TRY_ALL { *value_out = image_frame->ChannelSize(); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } MpReturnCode mp_ImageFrame__NumberOfChannels(mediapipe::ImageFrame* image_frame, int* value_out) { TRY_ALL { *value_out = image_frame->NumberOfChannels(); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } MpReturnCode mp_ImageFrame__ByteDepth(mediapipe::ImageFrame* image_frame, int* value_out) { TRY_ALL { *value_out = image_frame->ByteDepth(); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } int mp_ImageFrame__WidthStep(mediapipe::ImageFrame* image_frame) { return image_frame->WidthStep(); } uint8* mp_ImageFrame__MutablePixelData(mediapipe::ImageFrame* image_frame) { return image_frame->MutablePixelData(); } int mp_ImageFrame__PixelDataSize(mediapipe::ImageFrame* image_frame) { return image_frame->PixelDataSize(); } MpReturnCode mp_ImageFrame__PixelDataSizeStoredContiguously(mediapipe::ImageFrame* image_frame, int* value_out) { TRY_ALL { *value_out = image_frame->PixelDataSizeStoredContiguously(); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } MpReturnCode mp_ImageFrame__CopyToBuffer__Pui8_i(mediapipe::ImageFrame* image_frame, uint8* buffer, int buffer_size) { TRY_ALL { image_frame->CopyToBuffer(buffer, buffer_size); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } MpReturnCode mp_ImageFrame__CopyToBuffer__Pui16_i(mediapipe::ImageFrame* image_frame, uint16* buffer, int buffer_size) { TRY_ALL { image_frame->CopyToBuffer(buffer, buffer_size); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } MpReturnCode mp_ImageFrame__CopyToBuffer__Pf_i(mediapipe::ImageFrame* image_frame, float* buffer, int buffer_size) { TRY_ALL { image_frame->CopyToBuffer(buffer, buffer_size); RETURN_CODE(MpReturnCode::Success); } CATCH_ALL } // StatusOr API void mp_StatusOrImageFrame__delete(StatusOrImageFrame* status_or_image_frame) { delete status_or_image_frame; } bool mp_StatusOrImageFrame__ok(StatusOrImageFrame* status_or_image_frame) { return mp_StatusOr__ok(status_or_image_frame); } MpReturnCode mp_StatusOrImageFrame__status(StatusOrImageFrame* status_or_image_frame, mediapipe::Status** status_out) { return mp_StatusOr__status(status_or_image_frame, status_out); } MpReturnCode mp_StatusOrImageFrame__ConsumeValueOrDie(StatusOrImageFrame* status_or_image_frame, mediapipe::ImageFrame** value_out) { return mp_StatusOr__ConsumeValueOrDie(status_or_image_frame, value_out); } // Packet API MpReturnCode mp__MakeImageFramePacket__Pif(mediapipe::ImageFrame* image_frame, mediapipe::Packet** packet_out) { TRY { *packet_out = new mediapipe::Packet { mediapipe::MakePacket<mediapipe::ImageFrame>(std::move(*image_frame)) }; RETURN_CODE(MpReturnCode::Success); } CATCH_EXCEPTION } MpReturnCode mp__MakeImageFramePacket_At__Pif_Rtimestamp(mediapipe::ImageFrame* image_frame, mediapipe::Timestamp* timestamp, mediapipe::Packet** packet_out) { TRY { *packet_out = new mediapipe::Packet { mediapipe::MakePacket<mediapipe::ImageFrame>(std::move(*image_frame)).At(*timestamp) }; RETURN_CODE(MpReturnCode::Success); } CATCH_EXCEPTION } MpReturnCode mp_Packet__ConsumeImageFrame(mediapipe::Packet* packet, StatusOrImageFrame** status_or_value_out) { return mp_Packet__Consume(packet, status_or_value_out); } MpReturnCode mp_Packet__GetImageFrame(mediapipe::Packet* packet, const mediapipe::ImageFrame** value_out) { return mp_Packet__Get(packet, value_out); } MpReturnCode mp_Packet__ValidateAsImageFrame(mediapipe::Packet* packet, mediapipe::Status** status_out) { TRY { *status_out = new mediapipe::Status { packet->ValidateAsType<mediapipe::ImageFrame>() }; RETURN_CODE(MpReturnCode::Success); } CATCH_EXCEPTION }
// // Copyright 2016 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // // // FileIO_Common.cpp #include "pxr/usd/sdf/fileIO_Common.h" #include "pxr/usd/sdf/reference.h" #include <boost/assign.hpp> #include <fstream> #include <sstream> #include <cctype> using std::map; using std::ostream; using std::string; using std::vector; static const char *_IndentString = " "; // basically duplicate Vt::Array::_StreamRecursive() but specialize for // string arrays by putting in quotes template <class T> static void _StringFromVtStringArray( string *valueStr, const VtArray<T> &valArray) { valueStr->append("["); if (typename VtArray<T>::const_pointer d = valArray.cdata()) { if (const size_t n = valArray.size()) { valueStr->append(Sdf_FileIOUtility::Quote(d[0])); for (size_t i = 1; i != n; ++i) { valueStr->append(", "); valueStr->append(Sdf_FileIOUtility::Quote(d[i])); } } } valueStr->append("]"); } // Helper to created a quoted string if the given value is holding a // string-valued type (specified by T). template <class T> static bool _StringFromVtStringValue(string* valueStr, const VtValue& value) { if (value.IsHolding<T>()) { *valueStr = Sdf_FileIOUtility::Quote( value.UncheckedGet<T>() ); return true; } else if (value.IsHolding<VtArray<T> >()) { const VtArray<T>& valArray = value.UncheckedGet<VtArray<T> >(); _StringFromVtStringArray(valueStr,valArray); return true; } return false; } // ------------------------------------------------------------ // Helpers functions for writing SdfListOp<T>. Consumers can // specialize the _ListOpWriter struct for custom behavior based // on the element type of the list op. namespace { template <class T> struct _ListOpWriter { static constexpr bool ItemPerLine = false; static constexpr bool SingleItemRequiresBrackets(const T& item) { return true; } static void Write(ostream& out, size_t indent, const T& item) { Sdf_FileIOUtility::Write(out, indent, TfStringify(item).c_str()); } }; template <> struct _ListOpWriter<string> { static constexpr bool ItemPerLine = false; static constexpr bool SingleItemRequiresBrackets(const string& s) { return true; } static void Write(ostream& out, size_t indent, const string& s) { Sdf_FileIOUtility::WriteQuotedString(out, indent, s); } }; template <> struct _ListOpWriter<TfToken> { static constexpr bool ItemPerLine = false; static constexpr bool SingleItemRequiresBrackets(const TfToken& s) { return true; } static void Write(ostream& out, size_t indent, const TfToken& s) { Sdf_FileIOUtility::WriteQuotedString(out, indent, s.GetString()); } }; template <> struct _ListOpWriter<SdfPath> { static constexpr bool ItemPerLine = true; static constexpr bool SingleItemRequiresBrackets(const SdfPath& path) { return false; } static void Write(ostream& out, size_t indent, const SdfPath& path) { Sdf_FileIOUtility::WriteSdfPath(out, indent, path); } }; template <> struct _ListOpWriter<SdfReference> { static constexpr bool ItemPerLine = true; static bool SingleItemRequiresBrackets(const SdfReference& ref) { return not ref.GetCustomData().empty(); } static void Write(ostream& out, size_t indent, const SdfReference& ref) { bool multiLineRefMetaData = not ref.GetCustomData().empty(); Sdf_FileIOUtility::Write(out, indent, ""); if (not ref.GetAssetPath().empty()) { Sdf_FileIOUtility::WriteAssetPath(out, 0, ref.GetAssetPath()); if (not ref.GetPrimPath().IsEmpty()) Sdf_FileIOUtility::WriteSdfPath(out, 0, ref.GetPrimPath()); } else { // If this is an internal reference, we always have to write // out a path, even if it's empty since that encodes a reference // to the default prim. Sdf_FileIOUtility::WriteSdfPath(out, 0, ref.GetPrimPath()); } if (multiLineRefMetaData) { Sdf_FileIOUtility::Puts(out, 0, " (\n"); } Sdf_FileIOUtility::WriteLayerOffset( out, indent+1, multiLineRefMetaData, ref.GetLayerOffset()); if (not ref.GetCustomData().empty()) { Sdf_FileIOUtility::Puts(out, indent+1, "customData = "); Sdf_FileIOUtility::WriteDictionary( out, indent+1, /* multiline = */ true, ref.GetCustomData()); } if (multiLineRefMetaData) { Sdf_FileIOUtility::Puts(out, indent, ")"); } } }; template <class ListOpList> void _WriteListOpList( ostream& out, size_t indent, const string& name, const ListOpList& listOpList, const string& op = string()) { typedef _ListOpWriter<typename ListOpList::value_type> _Writer; Sdf_FileIOUtility::Write(out, indent, "%s%s%s = ", op.c_str(), op.empty() ? "" : " ", name.c_str()); if (listOpList.empty()) { Sdf_FileIOUtility::Puts(out, 0, "None\n"); } else if (listOpList.size() == 1 and not _Writer::SingleItemRequiresBrackets(listOpList.front())) { _Writer::Write(out, 0, listOpList.front()); Sdf_FileIOUtility::Puts(out, 0, "\n"); } else { const bool itemPerLine = _Writer::ItemPerLine; Sdf_FileIOUtility::Puts(out, 0, itemPerLine ? "[\n" : "["); TF_FOR_ALL(it, listOpList) { _Writer::Write(out, itemPerLine ? indent + 1 : 0, *it); if (it.GetNext()) { Sdf_FileIOUtility::Puts(out, 0, itemPerLine ? ",\n" : ", "); } else { Sdf_FileIOUtility::Puts(out, 0, itemPerLine ? "\n" : ""); } } Sdf_FileIOUtility::Puts(out, itemPerLine ? indent : 0, "]\n"); } } template <class ListOp> void _WriteListOp( ostream &out, size_t indent, const std::string& name, const ListOp& listOp) { if (listOp.IsExplicit()) { _WriteListOpList(out, indent, name, listOp.GetExplicitItems()); } else { if (not listOp.GetDeletedItems().empty()) { _WriteListOpList(out, indent, name, listOp.GetDeletedItems(), "delete"); } if (not listOp.GetAddedItems().empty()) { _WriteListOpList(out, indent, name, listOp.GetAddedItems(), "add"); } if (not listOp.GetOrderedItems().empty()) { _WriteListOpList(out, indent, name, listOp.GetOrderedItems(), "reorder"); } } } } // end anonymous namespace // ------------------------------------------------------------ void Sdf_FileIOUtility::Puts(ostream &out, size_t indent, const std::string &str) { for (size_t i=0; i < indent; ++i) out << _IndentString; out << str; } void Sdf_FileIOUtility::Write(ostream &out, size_t indent, const char *fmt, ...) { for (size_t i=0; i < indent; ++i) out << _IndentString; va_list ap; va_start(ap, fmt); out << TfVStringPrintf(fmt, ap); va_end(ap); } bool Sdf_FileIOUtility::OpenParensIfNeeded(ostream &out, bool didParens, bool multiLine) { if (!didParens) { Puts(out, 0, multiLine ? " (\n" : " ("); } else if (!multiLine) { Puts(out, 0, "; "); } return true; } void Sdf_FileIOUtility::CloseParensIfNeeded(ostream &out, size_t indent, bool didParens, bool multiLine) { if (didParens) { Puts(out, multiLine ? indent : 0, ")"); } } void Sdf_FileIOUtility::WriteQuotedString(ostream &out, size_t indent, const string &str) { Puts(out, indent, Quote(str)); } void Sdf_FileIOUtility::WriteAssetPath(ostream &out, size_t indent, const string &str) { Write(out, indent, "@%s@", str.c_str()); } void Sdf_FileIOUtility::WriteDefaultValue( std::ostream &out, size_t indent, VtValue value) { // --- // Special case for SdfPath value types // --- if (value.IsHolding<SdfPath>()) { WriteSdfPath(out, indent, value.Get<SdfPath>() ); return; } // --- // General case value to string conversion and write-out. // --- std::string valueString = Sdf_FileIOUtility::StringFromVtValue(value); Sdf_FileIOUtility::Write(out, 0, " = %s", valueString.c_str()); } void Sdf_FileIOUtility::WriteSdfPath(ostream &out, size_t indent, const SdfPath &path, const string &markerName) { if (ARCH_LIKELY(markerName.empty())) { Write(out, indent, "<%s>", path.GetString().c_str()); } else { // Unexpected! That used to mean, an explicitly authored current marker. if (markerName == "None") { TF_RUNTIME_ERROR( "Encountered 'None' marker, this should not happen."); WriteSdfPath(out, indent, path, "current"); } else if (markerName == "authored") { TF_RUNTIME_ERROR("Authored markers can't be authored in menva as " "by object modelling."); WriteSdfPath(out, indent, path); } else { const char *fmt = SdfPath::IsBuiltInMarker(markerName) ? "<%s> @ %s" : "<%s> @ <%s>"; Write(out, indent, fmt, path.GetText(), markerName.c_str()); } } } template <class StrType> static bool _WriteNameVector(ostream &out, size_t indent, const vector<StrType> &vec) { size_t i, c = vec.size(); if (c>1) { Sdf_FileIOUtility::Puts(out, 0, "["); } for (i=0; i<c; i++) { if (i > 0) { Sdf_FileIOUtility::Puts(out, 0, ", "); } Sdf_FileIOUtility::WriteQuotedString(out, 0, vec[i]); } if (c>1) { Sdf_FileIOUtility::Puts(out, 0, "]"); } return true; } bool Sdf_FileIOUtility::WriteNameVector(ostream &out, size_t indent, const vector<string> &vec) { return _WriteNameVector(out, indent, vec); } bool Sdf_FileIOUtility::WriteNameVector(ostream &out, size_t indent, const vector<TfToken> &vec) { return _WriteNameVector(out, indent, vec); } bool Sdf_FileIOUtility::WriteTimeSamples(ostream &out, size_t indent, const SdfTimeSampleMap & samples) { TF_FOR_ALL(i, samples) { Write(out, indent+1, "%g: ", i->first); if (i->second.IsHolding<SdfPath>()) { WriteSdfPath(out, 0, i->second.Get<SdfPath>() ); } else { Puts(out, 0, StringFromVtValue( i->second )); } out << ",\n"; } return true; } bool Sdf_FileIOUtility::WriteRelocates(ostream &out, size_t indent, bool multiLine, const SdfRelocatesMap &reloMap) { Write(out, indent, "relocates = %s", multiLine ? "{\n" : "{ "); size_t itemCount = reloMap.size(); TF_FOR_ALL(it, reloMap) { WriteSdfPath(out, indent+1, it->first); Puts(out, 0, ": "); WriteSdfPath(out, 0, it->second); if (--itemCount > 0) { Puts(out, 0, ", "); } if (multiLine) { Puts(out, 0, "\n"); } } if (multiLine) { Puts(out, indent, "}\n"); } else { Puts(out, 0, " }"); } return true; } void Sdf_FileIOUtility::_WriteDictionary(ostream &out, size_t indent, bool multiLine, Sdf_FileIOUtility::_OrderedDictionary &dictionary, bool stringValuesOnly) { Puts(out, 0, multiLine ? "{\n" : "{ "); size_t counter = dictionary.size(); TF_FOR_ALL(i, dictionary) { counter--; const VtValue &value = *i->second; if (stringValuesOnly) { if (value.IsHolding<std::string>()) { WriteQuotedString(out, multiLine ? indent+1 : 0, *(i->first)); Write(out, 0, ": "); WriteQuotedString(out, 0, value.Get<string>()); if (counter > 0) { Puts(out, 0, ", "); } if (multiLine) { Puts(out, 0, "\n"); } } else { // CODE_COVERAGE_OFF // This is not possible to hit with the current public API. TF_RUNTIME_ERROR("Dictionary has a non-string value under key " "\"%s\"; skipping", i->first->c_str()); // CODE_COVERAGE_ON } } else { // Put quotes around the keyName if it is not a valid identifier string keyName = *(i->first); if (not TfIsValidIdentifier(keyName)) { keyName = "\"" + keyName + "\""; } if (value.IsHolding<VtDictionary>()) { Write(out, multiLine ? indent+1 : 0, "dictionary %s = ", keyName.c_str()); const VtDictionary &nestedDictionary = value.Get<VtDictionary>(); Sdf_FileIOUtility::_OrderedDictionary newDictionary; TF_FOR_ALL(it, nestedDictionary) { newDictionary[&it->first] = &it->second; } _WriteDictionary(out, indent+1, multiLine, newDictionary, /* stringValuesOnly = */ false ); } else { const TfToken& typeName = SdfValueTypeNames->GetSerializationName(value); Write(out, multiLine ? indent+1 : 0, "%s %s = ", typeName.GetText(), keyName.c_str()); // XXX: The logic here is very similar to that in // WriteDefaultValue. WBN to refactor. string str; if (_StringFromVtStringValue<string>(&str, value) or _StringFromVtStringValue<TfToken>(&str, value)) { Puts(out, 0, str); } else { Puts(out, 0, TfStringify(value)); } if (multiLine) { Puts(out, 0, "\n"); } } } if (not multiLine and counter > 0) { // CODE_COVERAGE_OFF // See multiLine comment below. Puts(out, 0, "; "); // CODE_COVERAGE_ON } } if (multiLine) { Puts(out, indent, "}\n"); } else { // CODE_COVERAGE_OFF // Not currently hittable from public API. Puts(out, 0, " }"); // CODE_COVERAGE_ON } } void Sdf_FileIOUtility::WriteDictionary(ostream &out, size_t indent, bool multiLine, const VtDictionary &dictionary, bool stringValuesOnly) { // Make sure the dictionary keys are written out in order. _OrderedDictionary newDictionary; TF_FOR_ALL(it, dictionary) { newDictionary[&it->first] = &it->second; } _WriteDictionary(out, indent, multiLine, newDictionary, stringValuesOnly); } template <class T> void Sdf_FileIOUtility::WriteListOp(std::ostream &out, size_t indent, const TfToken& fieldName, const SdfListOp<T>& listOp) { _WriteListOp(out, indent, fieldName, listOp); } template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfPathListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfReferenceListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfIntListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfInt64ListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfUIntListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfUInt64ListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfStringListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfTokenListOp&); template void Sdf_FileIOUtility::WriteListOp(std::ostream &, size_t, const TfToken&, const SdfUnregisteredValueListOp&); void Sdf_FileIOUtility::WriteLayerOffset(ostream &out, size_t indent, bool multiLine, const SdfLayerOffset& layerOffset) { // If there's anything interesting to write, write it. if (layerOffset != SdfLayerOffset()) { if (not multiLine) { Write(out, 0, " ("); } double offset = layerOffset.GetOffset(); double scale = layerOffset.GetScale(); if (offset != 0.0) { Write(out, multiLine ? indent : 0, "offset = %s%s", TfStringify(offset).c_str(), multiLine ? "\n" : ""); } if (scale != 1.0) { if (not multiLine and offset != 0) { Write(out, 0, "; "); } Write(out, multiLine ? indent : 0, "scale = %s%s", TfStringify(scale).c_str(), multiLine ? "\n" : ""); } if (not multiLine) { Write(out, 0, ")"); } } } string Sdf_FileIOUtility::Quote(const string &str) { static const char* hexdigit = "0123456789abcedf"; static const bool allowTripleQuotes = true; string result; // Choose quotes, double quote preferred. char quote = '"'; if (str.find('"') != string::npos and str.find('\'') == string::npos) { quote = '\''; } // Open quote. Choose single or triple quotes. bool tripleQuotes = false; if (allowTripleQuotes) { if (str.find('\n') != string::npos) { tripleQuotes = true; result += quote; result += quote; } } result += quote; // Escape string. TF_FOR_ALL(i, str) { switch (*i) { case '\n': // Pass newline as-is if using triple quotes, otherwise escape. if (tripleQuotes) { result += *i; } else { result += "\\n"; } break; case '\r': result += "\\r"; break; case '\t': result += "\\t"; break; case '\\': result += "\\\\"; break; default: if (*i == quote) { // Always escape the character we're using for quoting. result += '\\'; result += quote; } else if (not std::isprint(*i)) { // Non-printable; use two digit hex form. result += "\\x"; result += hexdigit[(*i >> 4) & 15]; result += hexdigit[*i & 15]; } else { // Printable, non-special. result += *i; } break; } } // End quote. result += quote; if (tripleQuotes) { result += quote; result += quote; } return result; } string Sdf_FileIOUtility::Quote(const TfToken &token) { return Quote(token.GetString()); } string Sdf_FileIOUtility::StringFromVtValue(const VtValue &value) { string s; if (_StringFromVtStringValue<string>(&s, value) or _StringFromVtStringValue<TfToken>(&s, value)) { return s; } if (value.IsHolding<char>()) { return TfStringify(static_cast<int>(value.UncheckedGet<char>())); } else if (value.IsHolding<unsigned char>()) { return TfStringify( static_cast<unsigned int>(value.UncheckedGet<unsigned char>())); } else if (value.IsHolding<signed char>()) { return TfStringify( static_cast<int>(value.UncheckedGet<signed char>())); } return TfStringify(value); } const char* Sdf_FileIOUtility::Stringify( SdfPermission val ) { switch(val) { case SdfPermissionPublic: return "public"; case SdfPermissionPrivate: return "private"; default: TF_CODING_ERROR("unknown value"); return ""; } } const char* Sdf_FileIOUtility::Stringify( SdfSpecifier val ) { switch(val) { case SdfSpecifierDef: return "def"; case SdfSpecifierOver: return "over"; case SdfSpecifierClass: return "class"; default: TF_CODING_ERROR("unknown value"); return ""; } } const char* Sdf_FileIOUtility::Stringify( SdfVariability val ) { switch(val) { case SdfVariabilityVarying: // Empty string implies SdfVariabilityVarying return ""; case SdfVariabilityUniform: return "uniform"; case SdfVariabilityConfig: return "config"; default: TF_CODING_ERROR("unknown value"); return ""; } }
/* * Copyright (c) Facebook, Inc. and its affiliates. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #include "CuptiMetricInterface.h" #include <chrono> #include "Logger.h" #include "cupti_call.h" using namespace std::chrono; using std::vector; namespace KINETO_NAMESPACE { CUpti_MetricID CuptiMetricInterface::idFromName(const std::string& name) { CUpti_MetricID metric_id{~0u}; CUptiResult res = CUPTI_CALL(cuptiMetricGetIdFromName(device_, name.c_str(), &metric_id)); if (res == CUPTI_ERROR_INVALID_METRIC_NAME) { LOG(WARNING) << "Invalid metric name: " << name; } return metric_id; } // Return a map of event IDs and names for a given metric id. // Note that many events don't have a name. In that case the name will // be set to the empty string. std::map<CUpti_EventID, std::string> CuptiMetricInterface::events( CUpti_MetricID metric_id) { uint32_t num_events; CUPTI_CALL(cuptiMetricGetNumEvents(metric_id, &num_events)); vector<CUpti_EventID> ids(num_events); size_t array_size = num_events * sizeof(CUpti_EventID); CUPTI_CALL(cuptiMetricEnumEvents(metric_id, &array_size, ids.data())); std::map<CUpti_EventID, std::string> res; for (CUpti_EventID id : ids) { // Attempt to lookup name from CUPTI constexpr size_t kMaxEventNameLength = 64; char cupti_name[kMaxEventNameLength]; size_t size = kMaxEventNameLength; CUPTI_CALL( cuptiEventGetAttribute(id, CUPTI_EVENT_ATTR_NAME, &size, cupti_name)); cupti_name[kMaxEventNameLength - 1] = 0; // CUPTI "helpfully" returns "event_name" when the event is unnamed. if (size > 0 && strcmp(cupti_name, "event_name") != 0) { res.emplace(id, cupti_name); } else { res.emplace(id, ""); } } return res; } CUpti_MetricValueKind CuptiMetricInterface::valueKind(CUpti_MetricID metric) { CUpti_MetricValueKind res{CUPTI_METRIC_VALUE_KIND_FORCE_INT}; size_t value_kind_size = sizeof(res); CUPTI_CALL(cuptiMetricGetAttribute( metric, CUPTI_METRIC_ATTR_VALUE_KIND, &value_kind_size, &res)); return res; } CUpti_MetricEvaluationMode CuptiMetricInterface::evaluationMode( CUpti_MetricID metric) { CUpti_MetricEvaluationMode eval_mode{ CUPTI_METRIC_EVALUATION_MODE_PER_INSTANCE}; size_t eval_mode_size = sizeof(eval_mode); CUPTI_CALL(cuptiMetricGetAttribute( metric, CUPTI_METRIC_ATTR_EVALUATION_MODE, &eval_mode_size, &eval_mode)); return eval_mode; } // FIXME: Consider caching value kind here SampleValue CuptiMetricInterface::calculate( CUpti_MetricID metric, CUpti_MetricValueKind kind, vector<CUpti_EventID>& events, vector<int64_t>& values, int64_t duration) { CUpti_MetricValue metric_value; CUPTI_CALL(cuptiMetricGetValue( device_, metric, events.size() * sizeof(CUpti_EventID), events.data(), values.size() * sizeof(int64_t), reinterpret_cast<uint64_t*>(values.data()), duration, &metric_value)); switch (kind) { case CUPTI_METRIC_VALUE_KIND_DOUBLE: case CUPTI_METRIC_VALUE_KIND_PERCENT: return SampleValue(metric_value.metricValueDouble); case CUPTI_METRIC_VALUE_KIND_UINT64: case CUPTI_METRIC_VALUE_KIND_INT64: case CUPTI_METRIC_VALUE_KIND_THROUGHPUT: return SampleValue(metric_value.metricValueUint64); case CUPTI_METRIC_VALUE_KIND_UTILIZATION_LEVEL: return SampleValue((int)metric_value.metricValueUtilizationLevel); default: assert(false); } return SampleValue(-1); } } // namespace KINETO_NAMESPACE
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include <fstream> #include <iostream> #include <memory> #include <string> #include <utility> #include <vector> #include <gmock/gmock.h> #include <gtest/gtest.h> #include "absl/algorithm/algorithm.h" #include "absl/memory/memory.h" #include "absl/strings/str_format.h" #include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/common.h" #include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/string_util.h" #include "tensorflow/lite/testing/util.h" #include "tensorflow/lite/tools/benchmark/benchmark_performance_options.h" #include "tensorflow/lite/tools/benchmark/benchmark_tflite_model.h" #include "tensorflow/lite/tools/command_line_flags.h" #include "tensorflow/lite/tools/delegates/delegate_provider.h" #include "tensorflow/lite/tools/logging.h" namespace { const std::string* g_fp32_model_path = nullptr; const std::string* g_int8_model_path = nullptr; const std::string* g_string_model_path = nullptr; } // namespace namespace tflite { namespace benchmark { namespace { enum class ModelGraphType { FP32, INT8, STRING }; BenchmarkParams CreateParams(int32_t num_runs, float min_secs, float max_secs, ModelGraphType graph_type = ModelGraphType::FP32) { BenchmarkParams params = BenchmarkTfLiteModel::DefaultParams(); params.Set<int32_t>("num_runs", num_runs); params.Set<float>("min_secs", min_secs); params.Set<float>("max_secs", max_secs); if (graph_type == ModelGraphType::INT8) { params.Set<std::string>("graph", *g_int8_model_path); } else if (graph_type == ModelGraphType::STRING) { params.Set<std::string>("graph", *g_string_model_path); } else { // by default, simply use the fp32 one. params.Set<std::string>("graph", *g_fp32_model_path); } return params; } BenchmarkParams CreateParams() { return CreateParams(2, 1.0f, 150.0f); } BenchmarkParams CreateFp32Params() { return CreateParams(2, 1.0f, 150.0f, ModelGraphType::FP32); } BenchmarkParams CreateInt8Params() { return CreateParams(2, 1.0f, 150.0f, ModelGraphType::INT8); } BenchmarkParams CreateStringParams() { return CreateParams(2, 1.0f, 150.0f, ModelGraphType::STRING); } std::string CreateFilePath(const std::string& file_name) { const char* tmp_dir = getenv("TEST_TMPDIR"); return std::string(tmp_dir ? tmp_dir : "./") + file_name; } void WriteInputLayerValueFile(const std::string& file_path, ModelGraphType graph_type, int num_elements, char file_value = 'a') { std::ofstream file(file_path); int bytes = 0; switch (graph_type) { case ModelGraphType::FP32: bytes = 4 * num_elements; break; case ModelGraphType::INT8: bytes = num_elements; break; default: LOG(WARNING) << absl::StrFormat( "ModelGraphType(enum_value:%d) is not known.", graph_type); LOG(WARNING) << "The size of the ModelGraphType will be 1 byte in tests."; bytes = num_elements; break; } std::vector<char> buffer(bytes, file_value); file.write(buffer.data(), bytes); } void CheckInputTensorValue(const TfLiteTensor* input_tensor, char expected_value) { ASSERT_THAT(input_tensor, testing::NotNull()); EXPECT_TRUE(std::all_of( input_tensor->data.raw, input_tensor->data.raw + input_tensor->bytes, [expected_value](char c) { return c == expected_value; })); } void CheckInputTensorValue(const TfLiteTensor* input_tensor, int tensor_dim_index, const std::string& expected_value) { StringRef tensor_value = GetString(input_tensor, tensor_dim_index); EXPECT_TRUE(absl::equal(tensor_value.str, tensor_value.str + tensor_value.len, expected_value.c_str(), expected_value.c_str() + expected_value.length())); } class TestBenchmark : public BenchmarkTfLiteModel { public: explicit TestBenchmark(BenchmarkParams params) : BenchmarkTfLiteModel(std::move(params)) {} const tflite::Interpreter* GetInterpreter() { return interpreter_.get(); } void Prepare() { PrepareInputData(); ResetInputsAndOutputs(); } const TfLiteTensor* GetInputTensor(int index) { return index >= interpreter_->inputs().size() ? nullptr : interpreter_->input_tensor(index); } }; TEST(BenchmarkTest, DoesntCrashFp32Model) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateFp32Params()); benchmark.Run(); } TEST(BenchmarkTest, DoesntCrashInt8Model) { ASSERT_THAT(g_int8_model_path, testing::NotNull()); TestBenchmark benchmark(CreateInt8Params()); benchmark.Run(); } TEST(BenchmarkTest, DoesntCrashStringModel) { ASSERT_THAT(g_int8_model_path, testing::NotNull()); TestBenchmark benchmark(CreateStringParams()); benchmark.Run(); } TEST(BenchmarkTest, SplitInputLayerNameAndValueFile) { std::vector<std::string> input_layer_value_files = { "input:/tmp/input", "input\\:0:/tmp/input", "input\\\\0:/tmp/input", "input\\\\:0:/tmp/input", "input\\:0:\\tmp\\input", }; std::vector<std::pair<std::string, std::string>> expected = { {"input", "/tmp/input"}, {"input:0", "/tmp/input"}, {"input\\\\0", "/tmp/input"}, {"input\\:0", "/tmp/input"}, {"input:0", "\\tmp\\input"}, }; std::pair<std::string, std::string> name_file_pair; for (int i = 0; i < input_layer_value_files.size(); ++i) { SplitInputLayerNameAndValueFile(input_layer_value_files[i], name_file_pair); EXPECT_EQ(name_file_pair.first, expected[i].first); EXPECT_EQ(name_file_pair.second, expected[i].second); } EXPECT_EQ(SplitInputLayerNameAndValueFile("a:b:c", name_file_pair), kTfLiteError); EXPECT_EQ(SplitInputLayerNameAndValueFile("abc", name_file_pair), kTfLiteError); } class TestMultiRunStatsRecorder : public MultiRunStatsRecorder { public: void OutputStats() override { MultiRunStatsRecorder::OutputStats(); // Check results have been sorted according to avg. latency in increasing // order, and the incomplete runs are at the back of the results. double pre_avg_latency = -1e6; bool has_incomplete = false; // ensure complete/incomplete are not mixed. for (const auto& result : results_) { const auto current_avg_latency = result.metrics.inference_time_us().avg(); if (result.completed) { EXPECT_GE(current_avg_latency, pre_avg_latency); EXPECT_FALSE(has_incomplete); } else { EXPECT_EQ(0, result.metrics.inference_time_us().count()); has_incomplete = true; } pre_avg_latency = current_avg_latency; } } }; TEST(BenchmarkTest, DoesntCrashMultiPerfOptions) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateFp32Params()); BenchmarkPerformanceOptions all_options_benchmark( &benchmark, std::make_unique<TestMultiRunStatsRecorder>()); all_options_benchmark.Run(); } TEST(BenchmarkTest, DoesntCrashMultiPerfOptionsWithProfiling) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); BenchmarkParams params = CreateFp32Params(); params.Set<bool>("enable_op_profiling", true); TestBenchmark benchmark(std::move(params)); BenchmarkPerformanceOptions all_options_benchmark(&benchmark); all_options_benchmark.Run(); } TEST(BenchmarkTest, DoesntCrashWithExplicitInputFp32Model) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); // Note: the following input-related params are *specific* to model // 'g_fp32_model_path' which is specified as 'lite:testdata/multi_add.bin for // the test. BenchmarkParams params = CreateFp32Params(); params.Set<std::string>("input_layer", "a,b,c,d"); params.Set<std::string>("input_layer_shape", "1,8,8,3:1,8,8,3:1,8,8,3:1,8,8,3"); params.Set<std::string>("input_layer_value_range", "d,1,10:b,0,100"); TestBenchmark benchmark(std::move(params)); benchmark.Run(); } TEST(BenchmarkTest, DoesntCrashWithExplicitInputInt8Model) { ASSERT_THAT(g_int8_model_path, testing::NotNull()); // Note: the following input-related params are *specific* to model // 'g_int8_model_path' which is specified as // 'lite:testdata/add_quantized_int8.bin for the test. int a_min = 1; int a_max = 10; BenchmarkParams params = CreateInt8Params(); params.Set<std::string>("input_layer", "a"); params.Set<std::string>("input_layer_shape", "1,8,8,3"); params.Set<std::string>("input_layer_value_range", absl::StrFormat("a,%d,%d", a_min, a_max)); TestBenchmark benchmark(std::move(params)); benchmark.Run(); auto input_tensor = benchmark.GetInputTensor(0); ASSERT_THAT(input_tensor, testing::NotNull()); EXPECT_TRUE(std::all_of( input_tensor->data.raw, input_tensor->data.raw + input_tensor->bytes, [a_min, a_max](int i) { return a_min <= i && i <= a_max; })); } TEST(BenchmarkTest, DoesntCrashWithExplicitInputValueFilesFp32Model) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); char file_value_b = 'b'; const std::string file_path_b = CreateFilePath("fp32_binary_b"); WriteInputLayerValueFile(file_path_b, ModelGraphType::FP32, 192, file_value_b); char file_value_d = 'd'; const std::string file_path_d = CreateFilePath("fp32_binary_d"); WriteInputLayerValueFile(file_path_d, ModelGraphType::FP32, 192, file_value_d); // Note: the following input-related params are *specific* to model // 'g_fp32_model_path' which is specified as 'lite:testdata/multi_add.bin for // the test. BenchmarkParams params = CreateFp32Params(); params.Set<std::string>("input_layer", "a,b,c,d"); params.Set<std::string>("input_layer_shape", "1,8,8,3:1,8,8,3:1,8,8,3:1,8,8,3"); params.Set<std::string>("input_layer_value_files", "d:" + file_path_d + ",b:" + file_path_b); TestBenchmark benchmark(std::move(params)); benchmark.Run(); CheckInputTensorValue(benchmark.GetInputTensor(1), file_value_b); CheckInputTensorValue(benchmark.GetInputTensor(3), file_value_d); } TEST(BenchmarkTest, DoesntCrashWithExplicitInputValueFilesInt8Model) { ASSERT_THAT(g_int8_model_path, testing::NotNull()); const std::string file_path = CreateFilePath("int8_binary"); char file_value = 'a'; WriteInputLayerValueFile(file_path, ModelGraphType::INT8, 192, file_value); // Note: the following input-related params are *specific* to model // 'g_int8_model_path' which is specified as // 'lite:testdata/add_quantized_int8.bin for the test. BenchmarkParams params = CreateInt8Params(); params.Set<std::string>("input_layer", "a"); params.Set<std::string>("input_layer_shape", "1,8,8,3"); params.Set<std::string>("input_layer_value_files", "a:" + file_path); TestBenchmark benchmark(std::move(params)); benchmark.Run(); CheckInputTensorValue(benchmark.GetInputTensor(0), file_value); } TEST(BenchmarkTest, DoesntCrashWithExplicitInputValueFilesStringModel) { ASSERT_THAT(g_string_model_path, testing::NotNull()); const std::string file_path = CreateFilePath("string_binary"); const std::string string_value_0 = "abcd"; const std::string string_value_1 = "12345"; const std::string string_value_2 = "a1b2c3d4e5"; std::ofstream file(file_path); // Store the terminating null-character ('\0') at the end of the returned // value by std::string::c_str(). file.write(string_value_0.c_str(), string_value_0.length() + 1); file.write(string_value_1.c_str(), string_value_1.length() + 1); file.write(string_value_2.c_str(), string_value_2.length() + 1); file.close(); // Note: the following input-related params are *specific* to model // 'g_string_model_path' which is specified as // 'lite:testdata/string_input_model.bin for the test. BenchmarkParams params = CreateStringParams(); params.Set<std::string>("input_layer", "a"); params.Set<std::string>("input_layer_shape", "1,3"); params.Set<std::string>("input_layer_value_files", "a:" + file_path); TestBenchmark benchmark(std::move(params)); benchmark.Run(); auto input_tensor = benchmark.GetInputTensor(0); ASSERT_THAT(input_tensor, testing::NotNull()); EXPECT_EQ(GetStringCount(input_tensor), 3); CheckInputTensorValue(input_tensor, 0, string_value_0); CheckInputTensorValue(input_tensor, 1, string_value_1); CheckInputTensorValue(input_tensor, 2, string_value_2); } class ScopedCommandlineArgs { public: explicit ScopedCommandlineArgs(const std::vector<std::string>& actual_args) { argc_ = actual_args.size() + 1; argv_ = new char*[argc_]; const std::string program_name = "benchmark_model"; int buffer_size = program_name.length() + 1; for (const auto& arg : actual_args) buffer_size += arg.length() + 1; buffer_ = new char[buffer_size]; auto next_start = program_name.copy(buffer_, program_name.length()); buffer_[next_start++] = '\0'; argv_[0] = buffer_; for (int i = 0; i < actual_args.size(); ++i) { const auto& arg = actual_args[i]; argv_[i + 1] = buffer_ + next_start; next_start += arg.copy(argv_[i + 1], arg.length()); buffer_[next_start++] = '\0'; } } ~ScopedCommandlineArgs() { delete[] argv_; delete[] buffer_; } int argc() const { return argc_; } char** argv() const { return argv_; } private: char* buffer_; // the buffer for all arguments. int argc_; char** argv_; // Each char* element points to each argument. }; TEST(BenchmarkTest, RunWithCorrectFlags) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateFp32Params()); ScopedCommandlineArgs scoped_argv({"--num_threads=4"}); auto status = benchmark.Run(scoped_argv.argc(), scoped_argv.argv()); EXPECT_EQ(kTfLiteOk, status); } TEST(BenchmarkTest, RunWithWrongFlags) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateFp32Params()); ScopedCommandlineArgs scoped_argv({"--num_threads=str"}); auto status = benchmark.Run(scoped_argv.argc(), scoped_argv.argv()); EXPECT_EQ(kTfLiteError, status); } TEST(BenchmarkTest, RunWithUseCaching) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateFp32Params()); ScopedCommandlineArgs scoped_argv({"--use_caching=false"}); auto status = benchmark.Run(scoped_argv.argc(), scoped_argv.argv()); EXPECT_EQ(kTfLiteOk, status); } class MaxDurationWorksTestListener : public BenchmarkListener { void OnBenchmarkEnd(const BenchmarkResults& results) override { const int64_t num_actual_runs = results.inference_time_us().count(); TFLITE_LOG(INFO) << "number of actual runs: " << num_actual_runs; EXPECT_GE(num_actual_runs, 1); EXPECT_LT(num_actual_runs, 100000000); } }; TEST(BenchmarkTest, MaxDurationWorks) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateParams(100000000 /* num_runs */, 1000000.0f /* min_secs */, 0.001f /* max_secs */)); MaxDurationWorksTestListener listener; benchmark.AddListener(&listener); benchmark.Run(); } TEST(BenchmarkTest, ParametersArePopulatedWhenInputShapeIsNotSpecified) { ASSERT_THAT(g_fp32_model_path, testing::NotNull()); TestBenchmark benchmark(CreateParams()); benchmark.Init(); benchmark.Prepare(); auto interpreter = benchmark.GetInterpreter(); auto inputs = interpreter->inputs(); ASSERT_GE(inputs.size(), 1); auto input_tensor = interpreter->tensor(inputs[0]); // Copy input tensor to a vector std::vector<char> input_bytes(input_tensor->data.raw, input_tensor->data.raw + input_tensor->bytes); benchmark.Prepare(); // Expect data is not the same. EXPECT_EQ(input_bytes.size(), input_tensor->bytes); EXPECT_FALSE(absl::equal(input_bytes.begin(), input_bytes.end(), input_tensor->data.raw, input_tensor->data.raw + input_tensor->bytes)); } } // namespace } // namespace benchmark } // namespace tflite int main(int argc, char** argv) { std::string fp32_model_path, int8_model_path, string_model_path; std::vector<tflite::Flag> flags = { tflite::Flag::CreateFlag("fp32_graph", &fp32_model_path, "Path to a fp32 model file."), tflite::Flag::CreateFlag("int8_graph", &int8_model_path, "Path to a int8 model file."), tflite::Flag::CreateFlag("string_graph", &string_model_path, "Path to a string model file."), }; g_fp32_model_path = &fp32_model_path; g_int8_model_path = &int8_model_path; g_string_model_path = &string_model_path; const bool parse_result = tflite::Flags::Parse(&argc, const_cast<const char**>(argv), flags); if (!parse_result) { std::cerr << tflite::Flags::Usage(argv[0], flags); return 1; } ::tflite::LogToStderr(); ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
/******************************************************************************* Licensed to the OpenCOR team under one or more contributor license agreements. See the NOTICE.txt file distributed with this work for additional information regarding copyright ownership. The OpenCOR team licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. *******************************************************************************/ //============================================================================== // CellMLModelRepositoryWindow plugin //============================================================================== #include "cellmlmodelrepositorywindowplugin.h" #include "cellmlmodelrepositorywindowwindow.h" #include "coreguiutils.h" //============================================================================== #include <Qt> //============================================================================== #include <QMainWindow> #include <QSettings> //============================================================================== namespace OpenCOR { namespace CellMLModelRepositoryWindow { //============================================================================== PLUGININFO_FUNC CellMLModelRepositoryWindowPluginInfo() { Descriptions descriptions; descriptions.insert("en", QString::fromUtf8("a plugin to access the <a href=\"http://models.physiomeproject.org/cellml/\">CellML Model Repository</a>.")); descriptions.insert("fr", QString::fromUtf8("une extension pour accéder au <a href=\"http://models.physiomeproject.org/cellml/\">Répertoire de Modèles CellML</a>.")); return new PluginInfo(PluginInfo::Organisation, true, false, QStringList() << "Core", descriptions); } //============================================================================== // I18n interface //============================================================================== void CellMLModelRepositoryWindowPlugin::retranslateUi() { // Retranslate our CellML Model Repository window action retranslateAction(mCellmlModelRepositoryWindowAction, tr("CellML Model Repository"), tr("Show/hide the CellML Model Repository window")); // Retranslate our CellML Model Repository window mCellmlModelRepositoryWindowWindow->retranslateUi(); } //============================================================================== // Plugin interface //============================================================================== void CellMLModelRepositoryWindowPlugin::initializePlugin(QMainWindow *pMainWindow) { // Create an action to show/hide our CellML Model Repository window mCellmlModelRepositoryWindowAction = Core::newAction(true, pMainWindow); // Create our CellML Model Repository window mCellmlModelRepositoryWindowWindow = new CellmlModelRepositoryWindowWindow(pMainWindow); } //============================================================================== void CellMLModelRepositoryWindowPlugin::finalizePlugin() { // We don't handle this interface... } //============================================================================== void CellMLModelRepositoryWindowPlugin::pluginsInitialized(const Plugins &pLoadedPlugins) { Q_UNUSED(pLoadedPlugins); // We don't handle this interface... } //============================================================================== void CellMLModelRepositoryWindowPlugin::loadSettings(QSettings *pSettings) { // Retrieve our CellML Model Repository window settings pSettings->beginGroup(mCellmlModelRepositoryWindowWindow->objectName()); mCellmlModelRepositoryWindowWindow->loadSettings(pSettings); pSettings->endGroup(); } //============================================================================== void CellMLModelRepositoryWindowPlugin::saveSettings(QSettings *pSettings) const { // Keep track of our CellML Model Repository window settings pSettings->beginGroup(mCellmlModelRepositoryWindowWindow->objectName()); mCellmlModelRepositoryWindowWindow->saveSettings(pSettings); pSettings->endGroup(); } //============================================================================== void CellMLModelRepositoryWindowPlugin::handleAction(const QUrl &pUrl) { Q_UNUSED(pUrl); // We don't handle this interface... } //============================================================================== // Window interface //============================================================================== Qt::DockWidgetArea CellMLModelRepositoryWindowPlugin::windowDefaultDockArea() const { // Return our default dock area return Qt::LeftDockWidgetArea; } //============================================================================== QAction * CellMLModelRepositoryWindowPlugin::windowAction() const { // Return our window action return mCellmlModelRepositoryWindowAction; } //============================================================================== QDockWidget * CellMLModelRepositoryWindowPlugin::windowWidget() const { // Return our window widget return mCellmlModelRepositoryWindowWindow; } //============================================================================== } // namespace CellMLModelRepositoryWindow } // namespace OpenCOR //============================================================================== // End of file //==============================================================================
#include <iostream> #include "BoardExtra.h" #include <cstdlib> using namespace std; //Default constructor Boarde::Boarde() { board_size = 0; board = NULL; copy = NULL; } //Destructor Boarde::~Boarde() { for (int i = 0; i < board_size; i++) { delete []board[i]; delete []copy[i]; } delete []board; delete []copy; } //Set board size void Boarde::set_board_size() { char level; do { cout << "Enter the level of difficulty: \n"; cout << "e for easy \n"; cout << "m for medium \n"; cout << "h for hard \n"; cin >> level; } while (level != 'e' && level != 'm' && level != 'h'); if (level == 'e') { cout << "Easy it is! \n"; board_size = 5; } else if (level == 'm') { cout << "Medium it is! \n"; board_size = 4; } else { cout << "Hard it is! \n"; board_size = 3; } } //Get board size int Boarde::get_board_size() { return board_size; } //Restart game void Boarde::restart() { char temp; cout << "Are you sure you want to restart? (y/n) \n"; cin >> temp; if (temp == 'y') { fill(); cout << "Restarting Game.... \n\n"; } } //2D array allocation void Boarde::allocate() { board = new int* [board_size]; copy = new int* [board_size]; for (int i = 0; i < board_size; ++i) { board[i] = new int[board_size]; copy[i] = new int[board_size]; } } //Filling array void Boarde::fill() { //Fill with zeros for (int i = 0; i < board_size; ++i) for (int k = 0; k < board_size; ++k) { board[i][k] = 0; copy[i][k] = 0; } //Starting tile positions int row1 = board_size - 1; int col1 = 1; int row2 = board_size - 2; int col2 = board_size - 1; board[row1][col1] = 2; board[row2][col2] = 2; copy[row1][col1] = 2; copy[row2][col2] = 2; } //Sets up board. That is pulls together allocation, board size and fill void Boarde::setup() { set_board_size(); allocate(); fill(); } //Print array with necessary spacings void Boarde::print() { for (int i = 0; i < board_size; ++i) { for (int k = 0; k < board_size; ++k) { if (board[i][k] == 0) cout << empty; else cout << board[i][k]; cout << "\t"; } cout << "\n\n"; } } //Duplicate array to use for undo void Boarde::duplicate() { for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size; j++) { copy[i][j] = board[i][j]; } } } //Undo void Boarde::undo() { for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size; j++) { board[i][j] = copy[i][j]; } } } //Introduce a new tile void Boarde::new_tile() { int row, col; bool cond = true; while(cond) { //Finding index position. Random and on edges int temp = rand() % 2; if (temp) { row = rand() % board_size; int t = rand() % 2; if (t) col = 0; else col = board_size - 1; } else { col = rand() % board_size; int t = rand() % 2; if (t) row = 0; else row = board_size - 1; } //If index position not filled if (board[row][col] == 0) { int range = (rand() % 10) + 1; if (range < 2) board[row][col] = 4; else board[row][col] = 2; cond = false; } } } //Moves elements up int Boarde::up() { //Variable that sees if new tile needs to be created after movement. //If for example no movement occurs, no new tile has to be created. //Same goes for down left and right. int t = 0; for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size; j++) { if (board[i][j] == 0) { //Empty point so start checking down column for (int k = i+1; k < board_size; k++) { //Found non empty. Move it up if (board[k][j] != 0) { board[i][j] = board[k][j]; board[k][j] = 0; t += 1; break; } } } } } return t; } //Sums elements that can be summed upwards int Boarde::sum_up() { int sum = 0; for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size - 1; j++) { //Summation is possible if (board[j][i] != 0 && (board[j][i] == board[j+1][i])) { board[j][i] += board[j+1][i]; board[j+1][i] = 0; sum += board[j][i]; } } } return sum; } //Moves elements down int Boarde::down() { int t = 0; //Start from bottom to top for (int i = board_size - 1; i >= 0; i--) { for (int j = 0; j < board_size; j++) { if (board[i][j] == 0) { //Empty point so start checking up column for (int k = i - 1; k >= 0; k--) { //Found non empty, move it down if (board[k][j] != 0) { board[i][j] = board[k][j]; board[k][j] = 0; t+=1; break; } } } } } return t; } //Downward summation int Boarde::sum_down() { int sum = 0; for (int i = 0; i < board_size; i++) { //From bottom to top for (int j = board_size - 1; j > 0; j--) { //Equal and nonzero, summation possible if (board[j][i] != 0 && board[j][i] == board[j-1][i]) { board[j][i] += board[j-1][i]; board[j-1][i] = 0; sum += board[j][i]; } } } return sum; } //Moves elements left int Boarde::left() { int t = 0; //Left to right checking for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size; j++) { //Found empty position, start checking to the right if (board[i][j] == 0) { for (int k = j + 1; k < board_size; k++) { //Found non zero point. Move it leftwards if (board[i][k] != 0) { board[i][j] = board[i][k]; board[i][k] = 0; t+=1; break; } } } } } return t; } //Leftward sum int Boarde::sum_left() { int sum = 0; //Checking from left to right for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size - 1; j++) { //Equal non zero points found. Sum them if (board[i][j] != 0 && (board[i][j] == board[i][j+1])) { board[i][j] += board[i][j+1]; board[i][j+1] = 0; sum += board[i][j]; } } } return sum; } //Move elements right int Boarde::right() { int t = 0; //Checking board right to left for (int i = 0; i < board_size; i++) { for (int j = board_size - 1; j >= 0; j--) { //Found zero element, start checking leftwards if (board[i][j] == 0) { for (int k = j -1; k >= 0; k--) { //Found non zero element, move it to the right if (board[i][k] != 0) { board[i][j] = board[i][k]; board[i][k] = 0; t = 1; break; } } } } } return t; } //Rightward sum int Boarde::sum_right() { int sum = 0; //Checking from right to left for (int i = 0; i < board_size; i++) { for (int j = board_size - 1; j > 0; j--) { //Equal and non zero elements found. Sum them if (board[i][j] != 0 && (board[i][j] == board[i][j-1])) { board[i][j] += board[i][j-1]; board[i][j-1] = 0; sum += board[i][j]; } } } return sum; } //Checks board to see if game is over bool Boarde::game_over() { //First check for zeros for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size; j++) { //Zero so game not over if (board[i][j] == 0) return false; } } //Now check for equal numbers //Same in parallel for (int i = 0; i < board_size; i++) { for (int j = 0; j < board_size - 1; j++) { //Equal so game not over if (board[i][j] == board[i][j+1]) return false; } } //Same perpendicular for (int i = 0; i < board_size - 1; i++) { for (int j = 0; j < board_size; j++) { //Equal so game not over if (board[i][j] == board[i+1][j]) return false; } } cout << " \n GAME OVER \n"; return true; }
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "cc/test/test_layer_tree_frame_sink.h" #include <stdint.h> #include <memory> #include <utility> #include "base/bind.h" #include "base/single_thread_task_runner.h" #include "cc/trees/layer_tree_frame_sink_client.h" #include "components/viz/common/frame_sinks/begin_frame_args.h" #include "components/viz/common/resources/bitmap_allocation.h" #include "components/viz/service/display/direct_renderer.h" #include "components/viz/service/display/output_surface.h" #include "components/viz/service/display/overlay_processor_stub.h" #include "components/viz/service/display/skia_output_surface.h" #include "components/viz/service/frame_sinks/compositor_frame_sink_support.h" #include "mojo/public/cpp/system/platform_handle.h" namespace cc { static constexpr viz::FrameSinkId kLayerTreeFrameSinkId(1, 1); TestLayerTreeFrameSink::TestLayerTreeFrameSink( scoped_refptr<viz::ContextProvider> compositor_context_provider, scoped_refptr<viz::RasterContextProvider> worker_context_provider, gpu::GpuMemoryBufferManager* gpu_memory_buffer_manager, const viz::RendererSettings& renderer_settings, scoped_refptr<base::SingleThreadTaskRunner> compositor_task_runner, bool synchronous_composite, bool disable_display_vsync, double refresh_rate, viz::BeginFrameSource* begin_frame_source) : LayerTreeFrameSink(std::move(compositor_context_provider), std::move(worker_context_provider), std::move(compositor_task_runner), gpu_memory_buffer_manager), synchronous_composite_(synchronous_composite), disable_display_vsync_(disable_display_vsync), renderer_settings_(renderer_settings), refresh_rate_(refresh_rate), frame_sink_id_(kLayerTreeFrameSinkId), parent_local_surface_id_allocator_( new viz::ParentLocalSurfaceIdAllocator), client_provided_begin_frame_source_(begin_frame_source), external_begin_frame_source_(this) { parent_local_surface_id_allocator_->GenerateId(); } TestLayerTreeFrameSink::~TestLayerTreeFrameSink() = default; void TestLayerTreeFrameSink::SetDisplayColorSpace( const gfx::ColorSpace& display_color_space) { display_color_spaces_ = gfx::DisplayColorSpaces(display_color_space); if (display_) display_->SetDisplayColorSpaces(display_color_spaces_); } bool TestLayerTreeFrameSink::BindToClient(LayerTreeFrameSinkClient* client) { if (!LayerTreeFrameSink::BindToClient(client)) return false; shared_bitmap_manager_ = std::make_unique<viz::TestSharedBitmapManager>(); frame_sink_manager_ = std::make_unique<viz::FrameSinkManagerImpl>(shared_bitmap_manager_.get()); std::unique_ptr<viz::OutputSurface> display_output_surface; if (renderer_settings_.use_skia_renderer) { auto output_surface = test_client_->CreateDisplaySkiaOutputSurface(); display_output_surface = std::move(output_surface); } else { display_output_surface = test_client_->CreateDisplayOutputSurface(context_provider()); } std::unique_ptr<viz::DisplayScheduler> scheduler; if (!synchronous_composite_) { if (client_provided_begin_frame_source_) { display_begin_frame_source_ = client_provided_begin_frame_source_; } else if (disable_display_vsync_) { begin_frame_source_ = std::make_unique<viz::BackToBackBeginFrameSource>( std::make_unique<viz::DelayBasedTimeSource>( compositor_task_runner_.get())); display_begin_frame_source_ = begin_frame_source_.get(); } else { begin_frame_source_ = std::make_unique<viz::DelayBasedBeginFrameSource>( std::make_unique<viz::DelayBasedTimeSource>( compositor_task_runner_.get()), viz::BeginFrameSource::kNotRestartableId); begin_frame_source_->OnUpdateVSyncParameters( base::TimeTicks::Now(), base::TimeDelta::FromMilliseconds(1000.f / refresh_rate_)); display_begin_frame_source_ = begin_frame_source_.get(); } scheduler = std::make_unique<viz::DisplayScheduler>( display_begin_frame_source_, compositor_task_runner_.get(), display_output_surface->capabilities().max_frames_pending); } auto overlay_processor = std::make_unique<viz::OverlayProcessorStub>(); display_ = std::make_unique<viz::Display>( shared_bitmap_manager_.get(), renderer_settings_, frame_sink_id_, std::move(display_output_surface), std::move(overlay_processor), std::move(scheduler), compositor_task_runner_); constexpr bool is_root = true; support_ = std::make_unique<viz::CompositorFrameSinkSupport>( this, frame_sink_manager_.get(), frame_sink_id_, is_root); support_->SetWantsAnimateOnlyBeginFrames(); client_->SetBeginFrameSource(&external_begin_frame_source_); if (display_begin_frame_source_) { frame_sink_manager_->RegisterBeginFrameSource(display_begin_frame_source_, frame_sink_id_); } display_->Initialize(this, frame_sink_manager_->surface_manager()); display_->renderer_for_testing()->SetEnlargePassTextureAmountForTesting( enlarge_pass_texture_amount_); display_->SetDisplayColorSpaces(display_color_spaces_); display_->SetVisible(true); return true; } void TestLayerTreeFrameSink::DetachFromClient() { // This acts like the |shared_bitmap_manager_| is a global object, while // in fact it is tied to the lifetime of this class and is destroyed below: // The shared_bitmap_manager_ has ownership of shared memory for each // SharedBitmapId that has been reported from the client. Since the client is // gone that memory can be freed. If we don't then it would leak. for (const auto& id : owned_bitmaps_) shared_bitmap_manager_->ChildDeletedSharedBitmap(id); owned_bitmaps_.clear(); if (display_begin_frame_source_) { frame_sink_manager_->UnregisterBeginFrameSource( display_begin_frame_source_); display_begin_frame_source_ = nullptr; } client_->SetBeginFrameSource(nullptr); support_ = nullptr; display_ = nullptr; begin_frame_source_ = nullptr; parent_local_surface_id_allocator_ = nullptr; frame_sink_manager_ = nullptr; shared_bitmap_manager_ = nullptr; test_client_ = nullptr; LayerTreeFrameSink::DetachFromClient(); } void TestLayerTreeFrameSink::SetLocalSurfaceId( const viz::LocalSurfaceId& local_surface_id) { test_client_->DisplayReceivedLocalSurfaceId(local_surface_id); } void TestLayerTreeFrameSink::SubmitCompositorFrame(viz::CompositorFrame frame, bool hit_test_data_changed, bool show_hit_test_borders) { DCHECK(frame.metadata.begin_frame_ack.has_damage); DCHECK(frame.metadata.begin_frame_ack.frame_id.IsSequenceValid()); test_client_->DisplayReceivedCompositorFrame(frame); gfx::Size frame_size = frame.size_in_pixels(); float device_scale_factor = frame.device_scale_factor(); viz::LocalSurfaceId local_surface_id = parent_local_surface_id_allocator_->GetCurrentLocalSurfaceIdAllocation() .local_surface_id(); if (frame_size != display_size_ || device_scale_factor != device_scale_factor_) { parent_local_surface_id_allocator_->GenerateId(); local_surface_id = parent_local_surface_id_allocator_->GetCurrentLocalSurfaceIdAllocation() .local_surface_id(); display_->SetLocalSurfaceId(local_surface_id, device_scale_factor); display_->Resize(frame_size); display_size_ = frame_size; device_scale_factor_ = device_scale_factor; } support_->SubmitCompositorFrame(local_surface_id, std::move(frame)); if (!display_->has_scheduler()) { display_->DrawAndSwap(base::TimeTicks::Now()); // Post this to get a new stack frame so that we exit this function before // calling the client to tell it that it is done. compositor_task_runner_->PostTask( FROM_HERE, base::BindOnce(&TestLayerTreeFrameSink::SendCompositorFrameAckToClient, weak_ptr_factory_.GetWeakPtr())); } } void TestLayerTreeFrameSink::DidNotProduceFrame(const viz::BeginFrameAck& ack) { DCHECK(!ack.has_damage); DCHECK(ack.frame_id.IsSequenceValid()); support_->DidNotProduceFrame(ack); } void TestLayerTreeFrameSink::DidAllocateSharedBitmap( base::ReadOnlySharedMemoryRegion region, const viz::SharedBitmapId& id) { bool ok = shared_bitmap_manager_->ChildAllocatedSharedBitmap(region.Map(), id); DCHECK(ok); owned_bitmaps_.insert(id); } void TestLayerTreeFrameSink::DidDeleteSharedBitmap( const viz::SharedBitmapId& id) { shared_bitmap_manager_->ChildDeletedSharedBitmap(id); owned_bitmaps_.erase(id); } void TestLayerTreeFrameSink::DidReceiveCompositorFrameAck( const std::vector<viz::ReturnedResource>& resources) { ReclaimResources(resources); // In synchronous mode, we manually send acks and this method should not be // used. if (!display_->has_scheduler()) return; client_->DidReceiveCompositorFrameAck(); } void TestLayerTreeFrameSink::OnBeginFrame( const viz::BeginFrameArgs& args, const viz::FrameTimingDetailsMap& timing_details) { for (const auto& pair : timing_details) client_->DidPresentCompositorFrame(pair.first, pair.second); external_begin_frame_source_.OnBeginFrame(args); } void TestLayerTreeFrameSink::ReclaimResources( const std::vector<viz::ReturnedResource>& resources) { client_->ReclaimResources(resources); } void TestLayerTreeFrameSink::OnBeginFramePausedChanged(bool paused) {} void TestLayerTreeFrameSink::DisplayOutputSurfaceLost() { client_->DidLoseLayerTreeFrameSink(); } void TestLayerTreeFrameSink::DisplayWillDrawAndSwap( bool will_draw_and_swap, viz::RenderPassList* render_passes) { test_client_->DisplayWillDrawAndSwap(will_draw_and_swap, render_passes); } void TestLayerTreeFrameSink::DisplayDidDrawAndSwap() { test_client_->DisplayDidDrawAndSwap(); } void TestLayerTreeFrameSink::DisplayDidReceiveCALayerParams( const gfx::CALayerParams& ca_layer_params) {} void TestLayerTreeFrameSink::DisplayDidCompleteSwapWithSize( const gfx::Size& pixel_Size) {} void TestLayerTreeFrameSink::OnNeedsBeginFrames(bool needs_begin_frames) { support_->SetNeedsBeginFrame(needs_begin_frames); } void TestLayerTreeFrameSink::SendCompositorFrameAckToClient() { client_->DidReceiveCompositorFrameAck(); } base::TimeDelta TestLayerTreeFrameSink::GetPreferredFrameIntervalForFrameSinkId( const viz::FrameSinkId& id) { return viz::BeginFrameArgs::MinInterval(); } } // namespace cc
// // Copyright 2016 Pixar // // Licensed under the Apache License, Version 2.0 (the "Apache License") // with the following modification; you may not use this file except in // compliance with the Apache License and the following modification to it: // Section 6. Trademarks. is deleted and replaced with: // // 6. Trademarks. This License does not grant permission to use the trade // names, trademarks, service marks, or product names of the Licensor // and its affiliates, except as required to comply with Section 4(c) of // the License and to reproduce the content of the NOTICE file. // // You may obtain a copy of the Apache License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the Apache License with the above modification is // distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the Apache License for the specific // language governing permissions and limitations under the Apache License. // #include "pxr/pxr.h" #include "pxr/usd/pcp/mapExpression.h" #include "pxr/usd/pcp/mapFunction.h" #include "pxr/usd/pcp/layerStack.h" #include "pxr/base/tracelite/trace.h" #include <tbb/concurrent_hash_map.h> PXR_NAMESPACE_OPEN_SCOPE struct Pcp_VariableImpl; // Add a mapping from </> to </> if the function does not already have one. static PcpMapFunction _AddRootIdentity(const PcpMapFunction &value) { static const SdfPath absoluteRoot = SdfPath::AbsoluteRootPath(); if (value.MapSourceToTarget(absoluteRoot) == absoluteRoot) { // This function already maps </> to </>; use it as-is. return value; } // Re-create the function with an added root identity mapping. PcpMapFunction::PathMap sourceToTargetMap = value.GetSourceToTargetMap(); sourceToTargetMap[absoluteRoot] = absoluteRoot; return PcpMapFunction::Create(sourceToTargetMap, value.GetTimeOffset()); } //////////////////////////////////////////////////////////////////////// PcpMapExpression::PcpMapExpression() { } bool PcpMapExpression::IsNull() const { return !_node; } void PcpMapExpression::Swap(PcpMapExpression &other) { _node.swap(other._node); } const PcpMapExpression::Value & PcpMapExpression::Evaluate() const { static PcpMapExpression::Value defaultValue; return _node ? _node->EvaluateAndCache() : defaultValue; } PcpMapExpression PcpMapExpression::Identity() { static const PcpMapExpression val = Constant(PcpMapFunction::Identity()); return val; } PcpMapExpression PcpMapExpression::Constant( const Value & value ) { return PcpMapExpression( _Node::New(_OpConstant, _NodeRefPtr(), _NodeRefPtr(), value) ); } PcpMapExpression PcpMapExpression::Compose(const PcpMapExpression &f) const { // Fast path short-circuits for identities if (IsConstantIdentity()) { return f; } if (f.IsConstantIdentity()) { return *this; } if (_node->key.op == _OpConstant && f._node->key.op == _OpConstant) { // Apply constant folding return Constant( Evaluate().Compose( f.Evaluate() ) ); } return PcpMapExpression( _Node::New(_OpCompose, _node, f._node) ); } PcpMapExpression PcpMapExpression::Inverse() const { // Fast path short-circuits for identities if (IsConstantIdentity()) { return *this; } if (_node->key.op == _OpConstant) { // Apply constant folding return Constant( Evaluate().GetInverse() ); } return PcpMapExpression( _Node::New(_OpInverse, _node) ); } PcpMapExpression PcpMapExpression::AddRootIdentity() const { // Fast path short-circuits for identities if (IsConstantIdentity()) { return *this; } if (_node->key.op == _OpConstant) { // Apply constant folding return Constant( _AddRootIdentity(Evaluate()) ); } if (_node->expressionTreeAlwaysHasIdentity) { return PcpMapExpression(_node); } return PcpMapExpression( _Node::New(_OpAddRootIdentity, _node) ); } //////////////////////////////////////////////////////////////////////// // Variable implementation PcpMapExpression::Variable::~Variable() { // Do nothing } // Private implementation for Variable. struct Pcp_VariableImpl : PcpMapExpression::Variable { virtual ~Pcp_VariableImpl() {} Pcp_VariableImpl(const PcpMapExpression::_NodeRefPtr &node) : _node(node) {} virtual const PcpMapExpression::Value & GetValue() const { return _node->GetValueForVariable(); } virtual void SetValue(const PcpMapExpression::Value & value) { _node->SetValueForVariable(value); } virtual PcpMapExpression GetExpression() const { return PcpMapExpression(_node); } const PcpMapExpression::_NodeRefPtr _node; }; PcpMapExpression::VariableRefPtr PcpMapExpression::NewVariable( const Value & initialValue ) { Pcp_VariableImpl *var = new Pcp_VariableImpl( _Node::New(_OpVariable) ); var->SetValue(initialValue); return VariableRefPtr(var); } //////////////////////////////////////////////////////////////////////// // Node namespace { template <class Key> struct _KeyHashEq { inline bool equal(const Key &l, const Key &r) const { return l == r; } inline size_t hash(const Key &k) const { return k.GetHash(); } }; } // anon struct PcpMapExpression::_Node::_NodeMap { typedef PcpMapExpression::_Node::Key Key; typedef tbb::concurrent_hash_map< Key, PcpMapExpression::_Node *, _KeyHashEq<Key> > MapType; typedef MapType::accessor accessor; MapType map; }; TfStaticData<PcpMapExpression::_Node::_NodeMap> PcpMapExpression::_Node::_nodeRegistry; bool PcpMapExpression::_Node::_ExpressionTreeAlwaysHasIdentity(const Key& key) { switch (key.op) { case _OpAddRootIdentity: return true; case _OpVariable: return false; case _OpConstant: { // Check if this maps </> back to </> -- in which case this // has a root identity mapping. SdfPath absRoot = SdfPath::AbsoluteRootPath(); return key.valueForConstant.MapSourceToTarget(absRoot) == absRoot; } case _OpCompose: // Composing two map expressions may cause the identity // mapping to be removed; consider the case where we compose // {</>:</>, </A>:</B>} and {</B>:</C>}. The expected result // is {</A>:</C>}. // // In this case, the expression tree will only have an identity // mapping if *both* subtrees being composed have an identity. return (key.arg1 && key.arg1->expressionTreeAlwaysHasIdentity && key.arg2 && key.arg2->expressionTreeAlwaysHasIdentity); default: // For any other operation, if either of the subtrees has an // identity mapping, so does this tree. return (key.arg1 && key.arg1->expressionTreeAlwaysHasIdentity) || (key.arg2 && key.arg2->expressionTreeAlwaysHasIdentity); } } PcpMapExpression::_NodeRefPtr PcpMapExpression::_Node::New( _Op op_, const _NodeRefPtr & arg1_, const _NodeRefPtr & arg2_, const Value & valueForConstant_ ) { TfAutoMallocTag2 tag("Pcp", "PcpMapExpresion"); const Key key(op_, arg1_, arg2_, valueForConstant_); if (key.op != _OpVariable) { // Check for existing instance to re-use _NodeMap::accessor accessor; if (_nodeRegistry->map.insert(accessor, key) || accessor->second->_refCount.fetch_and_increment() == 0) { // Either there was no node in the table, or there was but it had // begun dying (another client dropped its refcount to 0). We have // to create a new node in the table. When the client that is // killing the other node it looks for itself in the table, it will // either not find itself or will find a different node and so won't // remove it. _NodeRefPtr newNode(new _Node(key)); accessor->second = newNode.get(); return newNode; } return _NodeRefPtr(accessor->second, /*add_ref =*/ false); } return _NodeRefPtr(new _Node(key)); } PcpMapExpression::_Node::_Node( const Key & key_ ) : key(key_) , expressionTreeAlwaysHasIdentity(_ExpressionTreeAlwaysHasIdentity(key)) { _refCount = 0; if (key.arg1) { tbb::spin_mutex::scoped_lock lock(key.arg1->_mutex); key.arg1->_dependentExpressions.insert(this); } if (key.arg2) { tbb::spin_mutex::scoped_lock lock(key.arg2->_mutex); key.arg2->_dependentExpressions.insert(this); } } PcpMapExpression::_Node::~_Node() { if (key.arg1) { tbb::spin_mutex::scoped_lock lock(key.arg1->_mutex); key.arg1->_dependentExpressions.erase(this); } if (key.arg2) { tbb::spin_mutex::scoped_lock lock(key.arg2->_mutex); key.arg2->_dependentExpressions.erase(this); } if (key.op != _OpVariable) { // Remove from node map if present. _NodeMap::accessor accessor; if (_nodeRegistry->map.find(accessor, key) && accessor->second == this) { _nodeRegistry->map.erase(accessor); } } } const PcpMapExpression::Value & PcpMapExpression::_Node::EvaluateAndCache() const { if (!_cachedValue) { TRACE_SCOPE("PcpMapExpression::_Node::EvaluateAndCache - cache miss"); _cachedValue.reset(_EvaluateUncached()); } return _cachedValue.get(); } PcpMapExpression::Value PcpMapExpression::_Node::_EvaluateUncached() const { switch(key.op) { case _OpConstant: return key.valueForConstant; case _OpVariable: return _valueForVariable; case _OpInverse: return key.arg1->EvaluateAndCache().GetInverse(); case _OpCompose: return key.arg1->EvaluateAndCache() .Compose(key.arg2->EvaluateAndCache()); case _OpAddRootIdentity: return _AddRootIdentity(key.arg1->EvaluateAndCache()); default: TF_VERIFY(false, "unhandled case"); return PcpMapFunction(); } } void PcpMapExpression::_Node::_Invalidate() { if (_cachedValue) { _cachedValue.reset(); TF_FOR_ALL(dep, _dependentExpressions) { (*dep)->_Invalidate(); } } else { // This node is already invalid so dependent nodes are already invalid. } } void PcpMapExpression::_Node::SetValueForVariable(const Value & value) { if (key.op != _OpVariable) { TF_CODING_ERROR("Cannot set value for non-variable"); return; } if (_valueForVariable != value) { _valueForVariable = value; _Invalidate(); } } inline size_t PcpMapExpression::_Node::Key::GetHash() const { size_t hash = op; boost::hash_combine(hash, boost::get_pointer(arg1)); boost::hash_combine(hash, boost::get_pointer(arg2)); boost::hash_combine(hash, valueForConstant); return hash; } bool PcpMapExpression::_Node::Key::operator==(const Key &key) const { return op == key.op && arg1 == key.arg1 && arg2 == key.arg2 && valueForConstant == key.valueForConstant; } void intrusive_ptr_add_ref(PcpMapExpression::_Node* p) { ++p->_refCount; } void intrusive_ptr_release(PcpMapExpression::_Node* p) { if (p->_refCount.fetch_and_decrement() == 1) delete p; } PXR_NAMESPACE_CLOSE_SCOPE
// // Copyright (C) 2014 Novartis Institutes for BioMedical Research // // @@ All Rights Reserved @@ // This file is part of the RDKit. // The contents are covered by the terms of the BSD license // which is included in the file license.txt, found at the root // of the RDKit source tree. // #include <list> #include <algorithm> #include <math.h> #include <RDGeneral/BoostStartInclude.h> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> #include <RDGeneral/BoostEndInclude.h> #include <iostream> #include <sstream> #include "SubstructMatchCustom.h" #include "MaximumCommonSubgraph.h" #include <GraphMol/QueryOps.h> namespace RDKit { void parseMCSParametersJSON(const char* json, MCSParameters* params) { if (params && json && 0 != strlen(json)) { std::istringstream ss; ss.str(json); boost::property_tree::ptree pt; boost::property_tree::read_json(ss, pt); RDKit::MCSParameters& p = *params; p.MaximizeBonds = pt.get<bool>("MaximizeBonds", p.MaximizeBonds); p.Threshold = pt.get<double>("Threshold", p.Threshold); p.Timeout = pt.get<unsigned>("Timeout", p.Timeout); p.AtomCompareParameters.MatchValences = pt.get<bool>("MatchValences", p.AtomCompareParameters.MatchValences); p.AtomCompareParameters.MatchChiralTag = pt.get<bool>("MatchChiralTag", p.AtomCompareParameters.MatchChiralTag); p.AtomCompareParameters.MatchFormalCharge = pt.get<bool>( "MatchFormalCharge", p.AtomCompareParameters.MatchFormalCharge); p.AtomCompareParameters.RingMatchesRingOnly = pt.get<bool>( "RingMatchesRingOnly", p.AtomCompareParameters.RingMatchesRingOnly); p.BondCompareParameters.RingMatchesRingOnly = pt.get<bool>( "RingMatchesRingOnly", p.BondCompareParameters.RingMatchesRingOnly); p.BondCompareParameters.CompleteRingsOnly = pt.get<bool>( "CompleteRingsOnly", p.BondCompareParameters.CompleteRingsOnly); p.BondCompareParameters.MatchFusedRings = pt.get<bool>( "MatchFusedRings", p.BondCompareParameters.MatchFusedRings); p.BondCompareParameters.MatchFusedRingsStrict = pt.get<bool>( "MatchFusedRingsStrict", p.BondCompareParameters.MatchFusedRingsStrict); p.BondCompareParameters.MatchStereo = pt.get<bool>("MatchStereo", p.BondCompareParameters.MatchStereo); std::string s = pt.get<std::string>("AtomCompare", "def"); if (0 == strcmp("Any", s.c_str())) p.AtomTyper = MCSAtomCompareAny; else if (0 == strcmp("Elements", s.c_str())) p.AtomTyper = MCSAtomCompareElements; else if (0 == strcmp("Isotopes", s.c_str())) p.AtomTyper = MCSAtomCompareIsotopes; else if (0 == strcmp("AnyHeavy", s.c_str())) p.AtomTyper = MCSAtomCompareAnyHeavyAtom; s = pt.get<std::string>("BondCompare", "def"); if (0 == strcmp("Any", s.c_str())) p.BondTyper = MCSBondCompareAny; else if (0 == strcmp("Order", s.c_str())) p.BondTyper = MCSBondCompareOrder; else if (0 == strcmp("OrderExact", s.c_str())) p.BondTyper = MCSBondCompareOrderExact; p.InitialSeed = pt.get<std::string>("InitialSeed", ""); } } MCSResult findMCS(const std::vector<ROMOL_SPTR>& mols, const MCSParameters* params) { MCSParameters p; if (nullptr == params) params = &p; RDKit::FMCS::MaximumCommonSubgraph fmcs(params); return fmcs.find(mols); } MCSResult findMCS_P(const std::vector<ROMOL_SPTR>& mols, const char* params_json) { MCSParameters p; parseMCSParametersJSON(params_json, &p); return findMCS(mols, &p); } MCSResult findMCS(const std::vector<ROMOL_SPTR>& mols, bool maximizeBonds, double threshold, unsigned timeout, bool verbose, bool matchValences, bool ringMatchesRingOnly, bool completeRingsOnly, bool matchChiralTag, AtomComparator atomComp, BondComparator bondComp) { return findMCS(mols, maximizeBonds, threshold, timeout, verbose, matchValences, ringMatchesRingOnly, completeRingsOnly, matchChiralTag, atomComp, bondComp, IgnoreRingFusion); } MCSResult findMCS(const std::vector<ROMOL_SPTR>& mols, bool maximizeBonds, double threshold, unsigned timeout, bool verbose, bool matchValences, bool ringMatchesRingOnly, bool completeRingsOnly, bool matchChiralTag, AtomComparator atomComp, BondComparator bondComp, RingComparator ringComp) { auto* ps = new MCSParameters(); ps->MaximizeBonds = maximizeBonds; ps->Threshold = threshold; ps->Timeout = timeout; ps->Verbose = verbose; ps->AtomCompareParameters.MatchValences = matchValences; ps->AtomCompareParameters.MatchChiralTag = matchChiralTag; switch (atomComp) { case AtomCompareAny: ps->AtomTyper = MCSAtomCompareAny; break; case AtomCompareElements: ps->AtomTyper = MCSAtomCompareElements; break; case AtomCompareIsotopes: ps->AtomTyper = MCSAtomCompareIsotopes; break; case AtomCompareAnyHeavyAtom: ps->AtomTyper = MCSAtomCompareAnyHeavyAtom; break; } ps->AtomCompareParameters.RingMatchesRingOnly = ringMatchesRingOnly; switch (bondComp) { case BondCompareAny: ps->BondTyper = MCSBondCompareAny; break; case BondCompareOrder: ps->BondTyper = MCSBondCompareOrder; break; case BondCompareOrderExact: ps->BondTyper = MCSBondCompareOrderExact; break; } ps->BondCompareParameters.RingMatchesRingOnly = ringMatchesRingOnly; ps->BondCompareParameters.CompleteRingsOnly = completeRingsOnly; ps->BondCompareParameters.MatchFusedRings = (ringComp != IgnoreRingFusion); ps->BondCompareParameters.MatchFusedRingsStrict = (ringComp == StrictRingFusion); MCSResult res = findMCS(mols, ps); delete ps; return res; } bool MCSProgressCallbackTimeout(const MCSProgressData& stat, const MCSParameters& params, void* userData) { RDUNUSED_PARAM(stat); unsigned long long* t0 = (unsigned long long*)userData; unsigned long long t = nanoClock(); return t - *t0 <= params.Timeout * 1000000ULL; } // PREDEFINED FUNCTORS: //=== ATOM COMPARE ======================================================== static bool checkRingMatch(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2) { if (p.RingMatchesRingOnly) { bool atom1inRing = queryIsAtomInRing(mol1.getAtomWithIdx(atom1)); bool atom2inRing = queryIsAtomInRing(mol2.getAtomWithIdx(atom2)); return atom1inRing == atom2inRing; } else { return true; } } static bool checkAtomCharge(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2) { RDUNUSED_PARAM(p); const Atom& a1 = *mol1.getAtomWithIdx(atom1); const Atom& a2 = *mol2.getAtomWithIdx(atom2); return a1.getFormalCharge() == a2.getFormalCharge(); } static bool checkAtomChirality(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2) { RDUNUSED_PARAM(p); const Atom& a1 = *mol1.getAtomWithIdx(atom1); const Atom& a2 = *mol2.getAtomWithIdx(atom2); Atom::ChiralType ac1 = a1.getChiralTag(); Atom::ChiralType ac2 = a2.getChiralTag(); if (ac1 == Atom::CHI_TETRAHEDRAL_CW || ac1 == Atom::CHI_TETRAHEDRAL_CCW) { return (ac2 == Atom::CHI_TETRAHEDRAL_CW || ac2 == Atom::CHI_TETRAHEDRAL_CCW); } return true; } bool MCSAtomCompareAny(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2, void*) { if (p.MatchChiralTag && !checkAtomChirality(p, mol1, atom1, mol2, atom2)) return false; if (p.MatchFormalCharge && !checkAtomCharge(p, mol1, atom1, mol2, atom2)) return false; if (p.RingMatchesRingOnly) return checkRingMatch(p, mol1, atom1, mol2, atom2); return true; } bool MCSAtomCompareElements(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2, void*) { const Atom& a1 = *mol1.getAtomWithIdx(atom1); const Atom& a2 = *mol2.getAtomWithIdx(atom2); if (a1.getAtomicNum() != a2.getAtomicNum()) return false; if (p.MatchValences && a1.getTotalValence() != a2.getTotalValence()) return false; if (p.MatchChiralTag && !checkAtomChirality(p, mol1, atom1, mol2, atom2)) return false; if (p.MatchFormalCharge && !checkAtomCharge(p, mol1, atom1, mol2, atom2)) return false; if (p.RingMatchesRingOnly) return checkRingMatch(p, mol1, atom1, mol2, atom2); return true; } bool MCSAtomCompareIsotopes(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2, void* ud) { RDUNUSED_PARAM(ud); // ignore everything except isotope information: // if( ! MCSAtomCompareElements (p, mol1, atom1, mol2, atom2, ud)) // return false; const Atom& a1 = *mol1.getAtomWithIdx(atom1); const Atom& a2 = *mol2.getAtomWithIdx(atom2); if (a1.getIsotope() != a2.getIsotope()) return false; if (p.MatchChiralTag && !checkAtomChirality(p, mol1, atom1, mol2, atom2)) return false; if (p.MatchFormalCharge && !checkAtomCharge(p, mol1, atom1, mol2, atom2)) return false; if (p.RingMatchesRingOnly) return checkRingMatch(p, mol1, atom1, mol2, atom2); return true; } bool MCSAtomCompareAnyHeavyAtom(const MCSAtomCompareParameters& p, const ROMol& mol1, unsigned int atom1, const ROMol& mol2, unsigned int atom2, void*) { const Atom& a1 = *mol1.getAtomWithIdx(atom1); const Atom& a2 = *mol2.getAtomWithIdx(atom2); //Any atom, including H, matches another atom of the same type, according to the other flags if (a1.getAtomicNum() == a2.getAtomicNum() || (a1.getAtomicNum() > 1 && a2.getAtomicNum() > 1)){ return MCSAtomCompareAny(p,mol1,atom1,mol2,atom2,nullptr); } return false; } //=== BOND COMPARE ======================================================== class BondMatchOrderMatrix { bool MatchMatrix[Bond::ZERO + 1][Bond::ZERO + 1]; public: BondMatchOrderMatrix(bool ignoreAromatization) { memset(MatchMatrix, 0, sizeof(MatchMatrix)); for (size_t i = 0; i <= Bond::ZERO; i++) { // fill cells of the same and unspecified type MatchMatrix[i][i] = true; MatchMatrix[Bond::UNSPECIFIED][i] = MatchMatrix[i][Bond::UNSPECIFIED] = true; MatchMatrix[Bond::ZERO][i] = MatchMatrix[i][Bond::ZERO] = true; } if (ignoreAromatization) { MatchMatrix[Bond::SINGLE][Bond::AROMATIC] = MatchMatrix[Bond::AROMATIC][Bond::SINGLE] = true; MatchMatrix[Bond::SINGLE][Bond::ONEANDAHALF] = MatchMatrix[Bond::ONEANDAHALF][Bond::SINGLE] = true; MatchMatrix[Bond::DOUBLE][Bond::TWOANDAHALF] = MatchMatrix[Bond::TWOANDAHALF][Bond::DOUBLE] = true; MatchMatrix[Bond::TRIPLE][Bond::THREEANDAHALF] = MatchMatrix[Bond::THREEANDAHALF][Bond::TRIPLE] = true; MatchMatrix[Bond::QUADRUPLE][Bond::FOURANDAHALF] = MatchMatrix[Bond::FOURANDAHALF][Bond::QUADRUPLE] = true; MatchMatrix[Bond::QUINTUPLE][Bond::FIVEANDAHALF] = MatchMatrix[Bond::FIVEANDAHALF][Bond::QUINTUPLE] = true; } } inline bool isEqual(unsigned i, unsigned j) const { return MatchMatrix[i][j]; } }; static bool checkBondStereo(const MCSBondCompareParameters& p, const ROMol& mol1, unsigned int bond1, const ROMol& mol2, unsigned int bond2) { RDUNUSED_PARAM(p); const Bond* b1 = mol1.getBondWithIdx(bond1); const Bond* b2 = mol2.getBondWithIdx(bond2); Bond::BondStereo bs1 = b1->getStereo(); Bond::BondStereo bs2 = b2->getStereo(); if (b1->getBondType() == Bond::DOUBLE && b2->getBondType() == Bond::DOUBLE) { if (bs1 > Bond::STEREOANY && !(bs2 > Bond::STEREOANY)) return false; } return true; } static bool checkRingMatch(const MCSBondCompareParameters&, const ROMol&, unsigned int bond1, const ROMol& mol2, unsigned int bond2, void* v_ringMatchMatrixSet) { if (!v_ringMatchMatrixSet) throw "v_ringMatchMatrixSet is NULL"; // never FMCS::RingMatchTableSet* ringMatchMatrixSet = static_cast<FMCS::RingMatchTableSet*>(v_ringMatchMatrixSet); const std::vector<size_t>& ringsIdx1 = ringMatchMatrixSet->getQueryBondRings(bond1); // indices of rings const std::vector<size_t>& ringsIdx2 = ringMatchMatrixSet->getTargetBondRings(&mol2, bond2); // indices of rings bool bond1inRing = !ringsIdx1.empty(); bool bond2inRing = !ringsIdx2.empty(); // bond are both either in a ring or not return (bond1inRing == bond2inRing); } bool MCSBondCompareAny(const MCSBondCompareParameters& p, const ROMol& mol1, unsigned int bond1, const ROMol& mol2, unsigned int bond2, void* ud) { if (p.MatchStereo && !checkBondStereo(p, mol1, bond1, mol2, bond2)) return false; if (p.RingMatchesRingOnly) return checkRingMatch(p, mol1, bond1, mol2, bond2, ud); return true; } bool MCSBondCompareOrder(const MCSBondCompareParameters& p, const ROMol& mol1, unsigned int bond1, const ROMol& mol2, unsigned int bond2, void* ud) { static const BondMatchOrderMatrix match(true); // ignore Aromatization const Bond* b1 = mol1.getBondWithIdx(bond1); const Bond* b2 = mol2.getBondWithIdx(bond2); Bond::BondType t1 = b1->getBondType(); Bond::BondType t2 = b2->getBondType(); if (match.isEqual(t1, t2)) { if (p.MatchStereo && !checkBondStereo(p, mol1, bond1, mol2, bond2)) return false; if (p.RingMatchesRingOnly) return checkRingMatch(p, mol1, bond1, mol2, bond2, ud); return true; } return false; } bool MCSBondCompareOrderExact(const MCSBondCompareParameters& p, const ROMol& mol1, unsigned int bond1, const ROMol& mol2, unsigned int bond2, void* ud) { static const BondMatchOrderMatrix match(false); // AROMATIC != SINGLE const Bond* b1 = mol1.getBondWithIdx(bond1); const Bond* b2 = mol2.getBondWithIdx(bond2); Bond::BondType t1 = b1->getBondType(); Bond::BondType t2 = b2->getBondType(); if (match.isEqual(t1, t2)) { if (p.MatchStereo && !checkBondStereo(p, mol1, bond1, mol2, bond2)) return false; if (p.RingMatchesRingOnly) return checkRingMatch(p, mol1, bond1, mol2, bond2, ud); return true; } return false; } //=== RING COMPARE ======================================================== inline static bool ringFusionCheck(const short unsigned c1[], const short unsigned c2[], const ROMol& mol1, const FMCS::Graph& query, const ROMol& mol2, const FMCS::Graph& target, const MCSParameters* p) { const RingInfo *ri2 = mol2.getRingInfo(); const VECT_INT_VECT &br2 = ri2->bondRings(); std::vector<size_t> nonFusedBonds(br2.size(), 0); std::vector<size_t> numMcsBondRings(br2.size(), 0); RDKit::FMCS::Graph::BOND_ITER_PAIR bpIter = boost::edges(query); size_t i = 0; // numMcsBondRings stores the number of bonds which // are part of the MCS for each ring for (auto it = bpIter.first; it != bpIter.second; ++it) { const Bond *b = mol2.getBondBetweenAtoms( target[c2[boost::source(*it, query)]], target[c2[boost::target(*it, query)]]); if (!b) continue; unsigned int bi = b->getIdx(); if (!ri2->numBondRings(bi)) continue; for (i = 0; i < br2.size(); ++i) { if (std::find(br2[i].begin(), br2[i].end(), bi) != br2[i].end()) ++numMcsBondRings[i]; } } // nonFusedBonds stores the number of non-fused bonds // (i.e., which belong to a single ring) for each ring for (i = 0; i < br2.size(); ++i) { for (auto bi: br2[i]) { if (ri2->numBondRings(bi) == 1) ++nonFusedBonds[i]; } } /* if a ring has at least one bond which is part of the MCS, we need to check how many bonds are actually part of the MCS: if they are greater than the number of fused bonds but lower than the number of non-fused bonds, then the ring is not complete and we should return true straightaway We need to check that the number of bonds in a ring is greater than the number of fused bonds because that guarantees that this ring is actually involved in the MCS and not just adjacent to another ring which is indeed part of the MCS. */ bool missingFusedBond = false; for (i = 0; i < br2.size(); ++i) { if (numMcsBondRings[i] && numMcsBondRings[i] > (br2[i].size() - nonFusedBonds[i]) && numMcsBondRings[i] < nonFusedBonds[i]) break; if (numMcsBondRings[i] == nonFusedBonds[i] && numMcsBondRings[i] < br2[i].size()) missingFusedBond = true; } if (i < br2.size()) return true; /* If we found that the MCS is missing a fused bond, we may need to check against the smaller molecule as well. Consider this case: MCS between 2-methylbicyclo[4.3.0]nonane and 1-methylbicyclo[3.1.0]hexane \__ / \_ \___ \__/ \ / \/ \ / \ / C C H2 H2 In permissive mode, we are happy for methylcyclohexane to be the MCS. in strict mode, we don't want methylcyclohexane to be the MCS. \__ / \ \__/ When methylcyclohexane is checked against 2-methylbicyclo[4.3.0]nonane there is no missing fused bond. This is OK for permissive mode. In strict mode, we also need to check against 1-methylbicyclo[3.1.0]hexane, where there is indeed a missing fused bond. */ bool missingFusedBond2 = false; if (missingFusedBond ^ p->BondCompareParameters.MatchFusedRingsStrict) { // if we are in permissive mode we allow one of the molecules to miss // fused bonds, but not both. In strict mode we allow neither. if (!p->BondCompareParameters.MatchFusedRingsStrict) missingFusedBond = false; const RingInfo *ri1 = mol1.getRingInfo(); const VECT_INT_VECT &br1 = ri1->bondRings(); std::set<unsigned int> mcsRingBondIdxSet; // put all MCS bond indices which belong to one or more rings in a set for (auto it = bpIter.first; it != bpIter.second; ++it) { const Bond *b = mol1.getBondBetweenAtoms( query[c1[boost::source(*it, query)]], query[c1[boost::target(*it, query)]]); if (b && ri1->numBondRings(b->getIdx())) mcsRingBondIdxSet.insert(b->getIdx()); } for (i = 0; i < br1.size(); ++i) { size_t foundBonds = 0; size_t fusedBonds = 0; // for each ring, check how many bonds belong to MCS (foundBonds) // and how many belong to multiple rings (fused bonds). for (auto bi: br1[i]) { if (std::find(mcsRingBondIdxSet.begin(), mcsRingBondIdxSet.end(), bi) != mcsRingBondIdxSet.end()) { ++foundBonds; if (ri1->numBondRings(bi) > 1) ++fusedBonds; } } // if the ring is part of the MCS, and we found more bonds than the fused ones // (i.e., the ring is not simply adjacent to an MCS ring) but less than // the bonds which are part of this ring, then some fused bond is missing. if (foundBonds && foundBonds > fusedBonds && foundBonds < br1[i].size()) { missingFusedBond2 = true; break; } } } // if both missingFusedBond and missingFusedBond2 are false, return true return (!missingFusedBond && !missingFusedBond2); } bool FinalMatchCheckFunction(const short unsigned c1[], const short unsigned c2[], const ROMol& mol1, const FMCS::Graph& query, const ROMol& mol2, const FMCS::Graph& target, const MCSParameters *p) { if ((p->BondCompareParameters.MatchFusedRings || p->BondCompareParameters.MatchFusedRingsStrict) && !ringFusionCheck(c1, c2, mol1, query, mol2, target, p)) return false; if (p->AtomCompareParameters.MatchChiralTag && !FinalChiralityCheckFunction(c1, c2, mol1, query, mol2, target, p)) return false; return true; } bool FinalChiralityCheckFunction(const short unsigned c1[], const short unsigned c2[], const ROMol& mol1, const FMCS::Graph& query, const ROMol& mol2, const FMCS::Graph& target, const MCSParameters* /*unused*/) { const unsigned int qna = boost::num_vertices(query); // getNumAtoms() // check chiral atoms only: for (unsigned int i = 0; i < qna; ++i) { const Atom& a1 = *mol1.getAtomWithIdx(query[c1[i]]); Atom::ChiralType ac1 = a1.getChiralTag(); const Atom& a2 = *mol2.getAtomWithIdx(target[c2[i]]); Atom::ChiralType ac2 = a2.getChiralTag(); ///*------------------ OLD Code : // ???: non chiral query atoms ARE ALLOWED TO MATCH to Chiral target atoms // (see test for issue 481) if (a1.getDegree() < 3 || //#688: doesn't deal with "explicit" Hs properly !(ac1 == Atom::CHI_TETRAHEDRAL_CW || ac1 == Atom::CHI_TETRAHEDRAL_CCW)) continue; // skip non chiral center QUERY atoms if (!(ac2 == Atom::CHI_TETRAHEDRAL_CW || ac2 == Atom::CHI_TETRAHEDRAL_CCW)) return false; //-------------------- /* More accurate check: if( !(ac1 == Atom::CHI_TETRAHEDRAL_CW || ac1 == Atom::CHI_TETRAHEDRAL_CCW) && !(ac2 == Atom::CHI_TETRAHEDRAL_CW || ac2 == Atom::CHI_TETRAHEDRAL_CCW)) continue; // skip check if both atoms are non chiral center if(!( (ac1 == Atom::CHI_TETRAHEDRAL_CW || ac1 == Atom::CHI_TETRAHEDRAL_CCW) && (ac2 == Atom::CHI_TETRAHEDRAL_CW || ac2 == Atom::CHI_TETRAHEDRAL_CCW)))//ac2 != ac1) return false; // both atoms must be chiral or not without a query priority */ const unsigned a1Degree = boost::out_degree(c1[i], query); // a1.getDegree(); // number of all connected atoms in a seed if (a1Degree > a2.getDegree()) { //#688 was != . // FIX issue 631 // printf("atoms Degree (%u, %u) %u [%u], %u\n", query[c1[i]], // target[c2[i]], a1Degree, a1.getDegree(), a2.getDegree()); if (1 == a1Degree && a1.getDegree() == a2.getDegree()) continue; // continue to grow the seed else return false; } INT_LIST qOrder; for (unsigned int j = 0; j < qna && qOrder.size() != a1Degree; ++j) { const Bond* qB = mol1.getBondBetweenAtoms(query[c1[i]], query[c1[j]]); if (qB) qOrder.push_back(qB->getIdx()); } //#688 INT_LIST qmoOrder; { ROMol::OEDGE_ITER dbeg, dend; boost::tie(dbeg, dend) = mol1.getAtomBonds(&a1); for (; dbeg != dend; dbeg++) { int dbidx = mol1[*dbeg]->getIdx(); if (std::find(qOrder.begin(), qOrder.end(), dbidx) != qOrder.end()) qmoOrder.push_back(dbidx); // else // qmoOrder.push_back(-1); } } int qPermCount = // was: a1.getPerturbationOrder(qOrder); static_cast<int>(countSwapsToInterconvert(qmoOrder, qOrder)); INT_LIST mOrder; for (unsigned int j = 0; j < qna && mOrder.size() != a2.getDegree(); ++j) { const Bond* mB = mol2.getBondBetweenAtoms(target[c2[i]], target[c2[j]]); if (mB) mOrder.push_back(mB->getIdx()); } //#688 while (mOrder.size() < a2.getDegree()) { mOrder.push_back(-1); } INT_LIST moOrder; ROMol::OEDGE_ITER dbeg, dend; boost::tie(dbeg, dend) = mol2.getAtomBonds(&a2); for (; dbeg != dend; dbeg++) { int dbidx = mol2[*dbeg]->getIdx(); if (std::find(mOrder.begin(), mOrder.end(), dbidx) != mOrder.end()) moOrder.push_back(dbidx); else moOrder.push_back(-1); } int mPermCount = // was: a2.getPerturbationOrder(mOrder); static_cast<int>(countSwapsToInterconvert(moOrder, mOrder)); //---- if ((qPermCount % 2 == mPermCount % 2 && a1.getChiralTag() != a2.getChiralTag()) || (qPermCount % 2 != mPermCount % 2 && a1.getChiralTag() == a2.getChiralTag())) return false; } // check double bonds ONLY (why ???) const unsigned int qnb = boost::num_edges(query); std::map<unsigned int, unsigned int> qMap; for (unsigned int j = 0; j < qna; ++j) qMap[query[c1[j]]] = j; RDKit::FMCS::Graph::BOND_ITER_PAIR bpIter = boost::edges(query); RDKit::FMCS::Graph::EDGE_ITER bIter = bpIter.first; for (unsigned int i = 0; i < qnb; i++, ++bIter) { const Bond* qBnd = mol1.getBondWithIdx(query[*bIter]); if (qBnd->getBondType() != Bond::DOUBLE || qBnd->getStereo() <= Bond::STEREOANY) continue; // don't think this can actually happen, but check to be sure: if (qBnd->getStereoAtoms().size() != 2) // MUST check it in the seed, not // in full query molecule, but // never happens !!! continue; const Bond* mBnd = mol2.getBondBetweenAtoms(target[c2[qMap[qBnd->getBeginAtomIdx()]]], target[c2[qMap[qBnd->getEndAtomIdx()]]]); CHECK_INVARIANT(mBnd, "Matching bond not found"); if (mBnd->getBondType() != Bond::DOUBLE || mBnd->getStereo() <= Bond::STEREOANY) continue; // don't think this can actually happen, but check to be sure: if (mBnd->getStereoAtoms().size() != 2) continue; unsigned int end1Matches = 0; unsigned int end2Matches = 0; if (target[c2[qMap[qBnd->getBeginAtomIdx()]]] == rdcast<unsigned int>(mBnd->getBeginAtomIdx())) { // query Begin == mol Begin if (target[c2[qMap[qBnd->getStereoAtoms()[0]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[0])) end1Matches = 1; if (target[c2[qMap[qBnd->getStereoAtoms()[1]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[1])) end2Matches = 1; } else { // query End == mol Begin if (target[c2[qMap[qBnd->getStereoAtoms()[0]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[1])) end1Matches = 1; if (target[c2[qMap[qBnd->getStereoAtoms()[1]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[0])) end2Matches = 1; } // std::cerr<<" bnd: "<<qBnd->getIdx()<<":"<<qBnd->getStereo()<<" - // "<<mBnd->getIdx()<<":"<<mBnd->getStereo()<<" -- "<<end1Matches<<" // "<<end2Matches<<std::endl; if (mBnd->getStereo() == qBnd->getStereo() && (end1Matches + end2Matches) == 1) return false; if (mBnd->getStereo() != qBnd->getStereo() && (end1Matches + end2Matches) != 1) return false; } return true; } bool FinalChiralityCheckFunction_1(const short unsigned c1[], const short unsigned c2[], const ROMol& mol1, const FMCS::Graph& query, const ROMol& mol2, const FMCS::Graph& target, const MCSParameters* p) { RDUNUSED_PARAM(p); const unsigned int qna = boost::num_vertices(query); // getNumAtoms() // check chiral atoms: for (unsigned int i = 0; i < qna; ++i) { const Atom& a1 = *mol1.getAtomWithIdx(query[c1[i]]); Atom::ChiralType ac1 = a1.getChiralTag(); if (!(ac1 == Atom::CHI_TETRAHEDRAL_CW || ac1 == Atom::CHI_TETRAHEDRAL_CCW)) continue; // skip non chiral center query atoms const Atom& a2 = *mol2.getAtomWithIdx(target[c2[i]]); Atom::ChiralType ac2 = a2.getChiralTag(); if (!(ac2 == Atom::CHI_TETRAHEDRAL_CW || ac2 == Atom::CHI_TETRAHEDRAL_CCW)) continue; // skip non chiral center TARGET atoms even if query atom is // chiral //// return false; // both atoms are chiral: const unsigned a1Degree = boost::out_degree(c1[i], query); // a1.getDegree(); if (a1Degree != a2.getDegree()) // number of all connected atoms in seed return false; // ??? INT_LIST qOrder; for (unsigned int j = 0; j < qna && qOrder.size() != a1Degree; ++j) { const Bond* qB = mol1.getBondBetweenAtoms(query[c1[i]], query[c1[j]]); if (qB) qOrder.push_back(qB->getIdx()); } int qPermCount = a1.getPerturbationOrder(qOrder); INT_LIST mOrder; for (unsigned int j = 0; j < qna && mOrder.size() != a2.getDegree(); ++j) { const Bond* mB = mol2.getBondBetweenAtoms(target[c2[i]], target[c2[j]]); if (mB) mOrder.push_back(mB->getIdx()); } int mPermCount = a2.getPerturbationOrder(mOrder); if ((qPermCount % 2 == mPermCount % 2 && a1.getChiralTag() != a2.getChiralTag()) || (qPermCount % 2 != mPermCount % 2 && a1.getChiralTag() == a2.getChiralTag())) return false; } // check double bonds ONLY (why ???) const unsigned int qnb = boost::num_edges(query); std::map<unsigned int, unsigned int> qMap; for (unsigned int j = 0; j < qna; ++j) qMap[query[c1[j]]] = j; RDKit::FMCS::Graph::BOND_ITER_PAIR bpIter = boost::edges(query); RDKit::FMCS::Graph::EDGE_ITER bIter = bpIter.first; for (unsigned int i = 0; i < qnb; i++, ++bIter) { const Bond* qBnd = mol1.getBondWithIdx(query[*bIter]); if (qBnd->getBondType() != Bond::DOUBLE || qBnd->getStereo() <= Bond::STEREOANY) continue; // don't think this can actually happen, but check to be sure: if (qBnd->getStereoAtoms().size() != 2) // MUST check it in the seed, not // in full query molecule, but // never happens !!! continue; const Bond* mBnd = mol2.getBondBetweenAtoms(target[c2[qMap[qBnd->getBeginAtomIdx()]]], target[c2[qMap[qBnd->getEndAtomIdx()]]]); CHECK_INVARIANT(mBnd, "Matching bond not found"); if (mBnd->getBondType() != Bond::DOUBLE || mBnd->getStereo() <= Bond::STEREOANY) continue; // don't think this can actually happen, but check to be sure: if (mBnd->getStereoAtoms().size() != 2) continue; unsigned int end1Matches = 0; unsigned int end2Matches = 0; if (target[c2[qMap[qBnd->getBeginAtomIdx()]]] == mBnd->getBeginAtomIdx()) { // query Begin == mol Begin if (target[c2[qMap[qBnd->getStereoAtoms()[0]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[0])) end1Matches = 1; if (target[c2[qMap[qBnd->getStereoAtoms()[1]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[1])) end2Matches = 1; } else { // query End == mol Begin if (target[c2[qMap[qBnd->getStereoAtoms()[0]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[1])) end1Matches = 1; if (target[c2[qMap[qBnd->getStereoAtoms()[1]]]] == rdcast<unsigned int>(mBnd->getStereoAtoms()[0])) end2Matches = 1; } // std::cerr<<" bnd: "<<qBnd->getIdx()<<":"<<qBnd->getStereo()<<" - // "<<mBnd->getIdx()<<":"<<mBnd->getStereo()<<" -- "<<end1Matches<<" // "<<end2Matches<<std::endl; if (mBnd->getStereo() == qBnd->getStereo() && (end1Matches + end2Matches) == 1) return false; if (mBnd->getStereo() != qBnd->getStereo() && (end1Matches + end2Matches) != 1) return false; } return true; } } // namespace RDKit
// Copyright Carl Philipp Reh 2006 - 2019. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <sge/bvh/dummy_node.hpp> #include <sge/bvh/object_impl.hpp> #include <sge/bvh/tree_traits.hpp> #include <fcppt/args_char.hpp> #include <fcppt/args_from_second.hpp> #include <fcppt/declare_strong_typedef.hpp> #include <fcppt/exception.hpp> #include <fcppt/main.hpp> #include <fcppt/make_int_range_count.hpp> #include <fcppt/make_ref.hpp> #include <fcppt/strong_typedef.hpp> #include <fcppt/strong_typedef_input.hpp> #include <fcppt/strong_typedef_output.hpp> #include <fcppt/text.hpp> #include <fcppt/algorithm/map.hpp> #include <fcppt/container/tree/depth.hpp> #include <fcppt/either/match.hpp> #include <fcppt/io/cerr.hpp> #include <fcppt/io/cout.hpp> #include <fcppt/math/box/rect.hpp> #include <fcppt/math/dim/arithmetic.hpp> #include <fcppt/options/argument.hpp> #include <fcppt/options/error.hpp> #include <fcppt/options/error_output.hpp> #include <fcppt/options/long_name.hpp> #include <fcppt/options/optional_help_text.hpp> #include <fcppt/options/parse.hpp> #include <fcppt/options/result_of.hpp> #include <fcppt/random/variate.hpp> #include <fcppt/random/distribution/basic.hpp> #include <fcppt/random/distribution/parameters/uniform_real.hpp> #include <fcppt/random/generator/minstd_rand.hpp> #include <fcppt/random/generator/seed_from_chrono.hpp> #include <fcppt/record/get.hpp> #include <fcppt/record/make_label.hpp> #include <fcppt/config/external_begin.hpp> #include <chrono> #include <cstddef> #include <exception> #include <iostream> #include <ostream> #include <fcppt/config/external_end.hpp> namespace { FCPPT_DECLARE_STRONG_TYPEDEF(unsigned, rectangle_count); void example_main(rectangle_count const _rectangle_count) { using bvh_box = fcppt::math::box::rect<float>; using bvh_tree_traits = sge::bvh::tree_traits<sge::bvh::dummy_node, bvh_box, bvh_box>; bvh_box::dim const total_bounding_box{ 1024.0F, // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) 1024.0F // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) }; using generator_type = fcppt::random::generator::minstd_rand; generator_type generator{fcppt::random::generator::seed_from_chrono<generator_type::seed>()}; using real_distribution = fcppt::random::distribution::basic< fcppt::random::distribution::parameters::uniform_real<float>>; using real_variate = fcppt::random::variate<generator_type, real_distribution>; real_variate screen_size_rng{ fcppt::make_ref(generator), real_distribution( real_distribution::param_type::min( total_bounding_box.h() / 8.0F // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) ), real_distribution::param_type::sup( total_bounding_box.h() - total_bounding_box.h() / 8.0F // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) ))}; real_variate size_rng{ fcppt::make_ref(generator), real_distribution( real_distribution::param_type::min( total_bounding_box.h() / 16.0F // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) ), real_distribution::param_type::sup( total_bounding_box.h() / 4.0F // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) ))}; sge::bvh::object<bvh_tree_traits> bounding_hierarchy; auto const nodes(fcppt::algorithm::map<bvh_tree_traits::leaf_sequence>( fcppt::make_int_range_count(_rectangle_count.get()), [&screen_size_rng, &size_rng](auto) { bvh_box::dim const new_size{size_rng(), size_rng()}; return bvh_box( bvh_box::vector( screen_size_rng(), screen_size_rng()) - (new_size / 2.0F // NOLINT(cppcoreguidelines-avoid-magic-numbers,readability-magic-numbers) ) .get_unsafe(), new_size); })); using clock_type = std::chrono::steady_clock; clock_type::time_point const before{clock_type::now()}; bounding_hierarchy.insert(nodes); fcppt::io::cout() << FCPPT_TEXT("Construction time: ") << std::chrono::duration_cast<std::chrono::milliseconds>(clock_type::now() - before).count() << FCPPT_TEXT('\n'); fcppt::io::cout() << FCPPT_TEXT("Tree depth: ") << fcppt::container::tree::depth(bounding_hierarchy.representation()) << FCPPT_TEXT('\n'); } FCPPT_RECORD_MAKE_LABEL(rectangle_count_label); } int FCPPT_MAIN(int argc, fcppt::args_char *argv[]) try { auto const parser(fcppt::options::argument<rectangle_count_label, rectangle_count>{ fcppt::options::long_name{FCPPT_TEXT("rectangle-count")}, fcppt::options::optional_help_text{}}); using result_type = fcppt::options::result_of<decltype(parser)>; return fcppt::either::match( fcppt::options::parse(parser, fcppt::args_from_second(argc, argv)), [](fcppt::options::error const &_error) { fcppt::io::cerr() << _error << FCPPT_TEXT('\n'); return EXIT_FAILURE; }, [](result_type const &_result) { example_main(fcppt::record::get<rectangle_count_label>(_result)); return EXIT_SUCCESS; }); } catch (fcppt::exception const &_error) { fcppt::io::cerr() << _error.string() << FCPPT_TEXT('\n'); return EXIT_FAILURE; } catch (std::exception const &_error) { std::cerr << _error.what() << '\n'; return EXIT_FAILURE; }
//===- lib/MC/MCDwarf.cpp - MCDwarf implementation ------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCObjectFileInfo.h" #include "llvm/MC/MCObjectWriter.h" #include "llvm/MC/MCRegisterInfo.h" #include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCExpr.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Support/LEB128.h" #include "llvm/Support/Path.h" #include "llvm/Support/SourceMgr.h" #include "llvm/ADT/Hashing.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/Twine.h" #include "llvm/Config/config.h" using namespace llvm; // Given a special op, return the address skip amount (in units of // DWARF2_LINE_MIN_INSN_LENGTH. #define SPECIAL_ADDR(op) (((op) - DWARF2_LINE_OPCODE_BASE)/DWARF2_LINE_RANGE) // The maximum address skip amount that can be encoded with a special op. #define MAX_SPECIAL_ADDR_DELTA SPECIAL_ADDR(255) // First special line opcode - leave room for the standard opcodes. // Note: If you want to change this, you'll have to update the // "standard_opcode_lengths" table that is emitted in DwarfFileTable::Emit(). #define DWARF2_LINE_OPCODE_BASE 13 // Minimum line offset in a special line info. opcode. This value // was chosen to give a reasonable range of values. #define DWARF2_LINE_BASE -5 // Range of line offsets in a special line info. opcode. #define DWARF2_LINE_RANGE 14 // Define the architecture-dependent minimum instruction length (in bytes). // This value should be rather too small than too big. #define DWARF2_LINE_MIN_INSN_LENGTH 1 // Note: when DWARF2_LINE_MIN_INSN_LENGTH == 1 which is the current setting, // this routine is a nop and will be optimized away. static inline uint64_t ScaleAddrDelta(uint64_t AddrDelta) { if (DWARF2_LINE_MIN_INSN_LENGTH == 1) return AddrDelta; if (AddrDelta % DWARF2_LINE_MIN_INSN_LENGTH != 0) { // TODO: report this error, but really only once. ; } return AddrDelta / DWARF2_LINE_MIN_INSN_LENGTH; } // // This is called when an instruction is assembled into the specified section // and if there is information from the last .loc directive that has yet to have // a line entry made for it is made. // void MCLineEntry::Make(MCStreamer *MCOS, const MCSection *Section) { if (!MCOS->getContext().getDwarfLocSeen()) return; // Create a symbol at in the current section for use in the line entry. MCSymbol *LineSym = MCOS->getContext().CreateTempSymbol(); // Set the value of the symbol to use for the MCLineEntry. MCOS->EmitLabel(LineSym); // Get the current .loc info saved in the context. const MCDwarfLoc &DwarfLoc = MCOS->getContext().getCurrentDwarfLoc(); // Create a (local) line entry with the symbol and the current .loc info. MCLineEntry LineEntry(LineSym, DwarfLoc); // clear DwarfLocSeen saying the current .loc info is now used. MCOS->getContext().ClearDwarfLocSeen(); // Get the MCLineSection for this section, if one does not exist for this // section create it. const DenseMap<const MCSection *, MCLineSection *> &MCLineSections = MCOS->getContext().getMCLineSections(); MCLineSection *LineSection = MCLineSections.lookup(Section); if (!LineSection) { // Create a new MCLineSection. This will be deleted after the dwarf line // table is created using it by iterating through the MCLineSections // DenseMap. LineSection = new MCLineSection; // Save a pointer to the new LineSection into the MCLineSections DenseMap. MCOS->getContext().addMCLineSection(Section, LineSection); } // Add the line entry to this section's entries. LineSection->addLineEntry(LineEntry); } // // This helper routine returns an expression of End - Start + IntVal . // static inline const MCExpr *MakeStartMinusEndExpr(const MCStreamer &MCOS, const MCSymbol &Start, const MCSymbol &End, int IntVal) { MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None; const MCExpr *Res = MCSymbolRefExpr::Create(&End, Variant, MCOS.getContext()); const MCExpr *RHS = MCSymbolRefExpr::Create(&Start, Variant, MCOS.getContext()); const MCExpr *Res1 = MCBinaryExpr::Create(MCBinaryExpr::Sub, Res, RHS, MCOS.getContext()); const MCExpr *Res2 = MCConstantExpr::Create(IntVal, MCOS.getContext()); const MCExpr *Res3 = MCBinaryExpr::Create(MCBinaryExpr::Sub, Res1, Res2, MCOS.getContext()); return Res3; } // // This emits the Dwarf line table for the specified section from the entries // in the LineSection. // static inline void EmitDwarfLineTable(MCStreamer *MCOS, const MCSection *Section, const MCLineSection *LineSection) { unsigned FileNum = 1; unsigned LastLine = 1; unsigned Column = 0; unsigned Flags = DWARF2_LINE_DEFAULT_IS_STMT ? DWARF2_FLAG_IS_STMT : 0; unsigned Isa = 0; MCSymbol *LastLabel = NULL; // Loop through each MCLineEntry and encode the dwarf line number table. for (MCLineSection::const_iterator it = LineSection->getMCLineEntries()->begin(), ie = LineSection->getMCLineEntries()->end(); it != ie; ++it) { if (FileNum != it->getFileNum()) { FileNum = it->getFileNum(); MCOS->EmitIntValue(dwarf::DW_LNS_set_file, 1); MCOS->EmitULEB128IntValue(FileNum); } if (Column != it->getColumn()) { Column = it->getColumn(); MCOS->EmitIntValue(dwarf::DW_LNS_set_column, 1); MCOS->EmitULEB128IntValue(Column); } if (Isa != it->getIsa()) { Isa = it->getIsa(); MCOS->EmitIntValue(dwarf::DW_LNS_set_isa, 1); MCOS->EmitULEB128IntValue(Isa); } if ((it->getFlags() ^ Flags) & DWARF2_FLAG_IS_STMT) { Flags = it->getFlags(); MCOS->EmitIntValue(dwarf::DW_LNS_negate_stmt, 1); } if (it->getFlags() & DWARF2_FLAG_BASIC_BLOCK) MCOS->EmitIntValue(dwarf::DW_LNS_set_basic_block, 1); if (it->getFlags() & DWARF2_FLAG_PROLOGUE_END) MCOS->EmitIntValue(dwarf::DW_LNS_set_prologue_end, 1); if (it->getFlags() & DWARF2_FLAG_EPILOGUE_BEGIN) MCOS->EmitIntValue(dwarf::DW_LNS_set_epilogue_begin, 1); int64_t LineDelta = static_cast<int64_t>(it->getLine()) - LastLine; MCSymbol *Label = it->getLabel(); // At this point we want to emit/create the sequence to encode the delta in // line numbers and the increment of the address from the previous Label // and the current Label. const MCAsmInfo &asmInfo = MCOS->getContext().getAsmInfo(); MCOS->EmitDwarfAdvanceLineAddr(LineDelta, LastLabel, Label, asmInfo.getPointerSize()); LastLine = it->getLine(); LastLabel = Label; } // Emit a DW_LNE_end_sequence for the end of the section. // Using the pointer Section create a temporary label at the end of the // section and use that and the LastLabel to compute the address delta // and use INT64_MAX as the line delta which is the signal that this is // actually a DW_LNE_end_sequence. // Switch to the section to be able to create a symbol at its end. MCOS->SwitchSection(Section); MCContext &context = MCOS->getContext(); // Create a symbol at the end of the section. MCSymbol *SectionEnd = context.CreateTempSymbol(); // Set the value of the symbol, as we are at the end of the section. MCOS->EmitLabel(SectionEnd); // Switch back the dwarf line section. MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfLineSection()); const MCAsmInfo &asmInfo = MCOS->getContext().getAsmInfo(); MCOS->EmitDwarfAdvanceLineAddr(INT64_MAX, LastLabel, SectionEnd, asmInfo.getPointerSize()); } // // This emits the Dwarf file and the line tables. // const MCSymbol *MCDwarfFileTable::Emit(MCStreamer *MCOS) { MCContext &context = MCOS->getContext(); // Switch to the section where the table will be emitted into. MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfLineSection()); // Create a symbol at the beginning of this section. MCSymbol *LineStartSym = context.CreateTempSymbol(); // Set the value of the symbol, as we are at the start of the section. MCOS->EmitLabel(LineStartSym); // Create a symbol for the end of the section (to be set when we get there). MCSymbol *LineEndSym = context.CreateTempSymbol(); // The first 4 bytes is the total length of the information for this // compilation unit (not including these 4 bytes for the length). MCOS->EmitAbsValue(MakeStartMinusEndExpr(*MCOS, *LineStartSym, *LineEndSym,4), 4); // Next 2 bytes is the Version, which is Dwarf 2. MCOS->EmitIntValue(2, 2); // Create a symbol for the end of the prologue (to be set when we get there). MCSymbol *ProEndSym = context.CreateTempSymbol(); // Lprologue_end // Length of the prologue, is the next 4 bytes. Which is the start of the // section to the end of the prologue. Not including the 4 bytes for the // total length, the 2 bytes for the version, and these 4 bytes for the // length of the prologue. MCOS->EmitAbsValue(MakeStartMinusEndExpr(*MCOS, *LineStartSym, *ProEndSym, (4 + 2 + 4)), 4, 0); // Parameters of the state machine, are next. MCOS->EmitIntValue(DWARF2_LINE_MIN_INSN_LENGTH, 1); MCOS->EmitIntValue(DWARF2_LINE_DEFAULT_IS_STMT, 1); MCOS->EmitIntValue(DWARF2_LINE_BASE, 1); MCOS->EmitIntValue(DWARF2_LINE_RANGE, 1); MCOS->EmitIntValue(DWARF2_LINE_OPCODE_BASE, 1); // Standard opcode lengths MCOS->EmitIntValue(0, 1); // length of DW_LNS_copy MCOS->EmitIntValue(1, 1); // length of DW_LNS_advance_pc MCOS->EmitIntValue(1, 1); // length of DW_LNS_advance_line MCOS->EmitIntValue(1, 1); // length of DW_LNS_set_file MCOS->EmitIntValue(1, 1); // length of DW_LNS_set_column MCOS->EmitIntValue(0, 1); // length of DW_LNS_negate_stmt MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_basic_block MCOS->EmitIntValue(0, 1); // length of DW_LNS_const_add_pc MCOS->EmitIntValue(1, 1); // length of DW_LNS_fixed_advance_pc MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_prologue_end MCOS->EmitIntValue(0, 1); // length of DW_LNS_set_epilogue_begin MCOS->EmitIntValue(1, 1); // DW_LNS_set_isa // Put out the directory and file tables. // First the directory table. const std::vector<StringRef> &MCDwarfDirs = context.getMCDwarfDirs(); for (unsigned i = 0; i < MCDwarfDirs.size(); i++) { MCOS->EmitBytes(MCDwarfDirs[i], 0); // the DirectoryName MCOS->EmitBytes(StringRef("\0", 1), 0); // the null term. of the string } MCOS->EmitIntValue(0, 1); // Terminate the directory list // Second the file table. const std::vector<MCDwarfFile *> &MCDwarfFiles = MCOS->getContext().getMCDwarfFiles(); for (unsigned i = 1; i < MCDwarfFiles.size(); i++) { MCOS->EmitBytes(MCDwarfFiles[i]->getName(), 0); // FileName MCOS->EmitBytes(StringRef("\0", 1), 0); // the null term. of the string // the Directory num MCOS->EmitULEB128IntValue(MCDwarfFiles[i]->getDirIndex()); MCOS->EmitIntValue(0, 1); // last modification timestamp (always 0) MCOS->EmitIntValue(0, 1); // filesize (always 0) } MCOS->EmitIntValue(0, 1); // Terminate the file list // This is the end of the prologue, so set the value of the symbol at the // end of the prologue (that was used in a previous expression). MCOS->EmitLabel(ProEndSym); // Put out the line tables. const DenseMap<const MCSection *, MCLineSection *> &MCLineSections = MCOS->getContext().getMCLineSections(); const std::vector<const MCSection *> &MCLineSectionOrder = MCOS->getContext().getMCLineSectionOrder(); for (std::vector<const MCSection*>::const_iterator it = MCLineSectionOrder.begin(), ie = MCLineSectionOrder.end(); it != ie; ++it) { const MCSection *Sec = *it; const MCLineSection *Line = MCLineSections.lookup(Sec); EmitDwarfLineTable(MCOS, Sec, Line); // Now delete the MCLineSections that were created in MCLineEntry::Make() // and used to emit the line table. delete Line; } if (MCOS->getContext().getAsmInfo().getLinkerRequiresNonEmptyDwarfLines() && MCLineSectionOrder.begin() == MCLineSectionOrder.end()) { // The darwin9 linker has a bug (see PR8715). For for 32-bit architectures // it requires: // total_length >= prologue_length + 10 // We are 4 bytes short, since we have total_length = 51 and // prologue_length = 45 // The regular end_sequence should be sufficient. MCDwarfLineAddr::Emit(MCOS, INT64_MAX, 0); } // This is the end of the section, so set the value of the symbol at the end // of this section (that was used in a previous expression). MCOS->EmitLabel(LineEndSym); return LineStartSym; } /// Utility function to write the encoding to an object writer. void MCDwarfLineAddr::Write(MCObjectWriter *OW, int64_t LineDelta, uint64_t AddrDelta) { SmallString<256> Tmp; raw_svector_ostream OS(Tmp); MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OS); OW->WriteBytes(OS.str()); } /// Utility function to emit the encoding to a streamer. void MCDwarfLineAddr::Emit(MCStreamer *MCOS, int64_t LineDelta, uint64_t AddrDelta) { SmallString<256> Tmp; raw_svector_ostream OS(Tmp); MCDwarfLineAddr::Encode(LineDelta, AddrDelta, OS); MCOS->EmitBytes(OS.str(), /*AddrSpace=*/0); } /// Utility function to encode a Dwarf pair of LineDelta and AddrDeltas. void MCDwarfLineAddr::Encode(int64_t LineDelta, uint64_t AddrDelta, raw_ostream &OS) { uint64_t Temp, Opcode; bool NeedCopy = false; // Scale the address delta by the minimum instruction length. AddrDelta = ScaleAddrDelta(AddrDelta); // A LineDelta of INT64_MAX is a signal that this is actually a // DW_LNE_end_sequence. We cannot use special opcodes here, since we want the // end_sequence to emit the matrix entry. if (LineDelta == INT64_MAX) { if (AddrDelta == MAX_SPECIAL_ADDR_DELTA) OS << char(dwarf::DW_LNS_const_add_pc); else { OS << char(dwarf::DW_LNS_advance_pc); encodeULEB128(AddrDelta, OS); } OS << char(dwarf::DW_LNS_extended_op); OS << char(1); OS << char(dwarf::DW_LNE_end_sequence); return; } // Bias the line delta by the base. Temp = LineDelta - DWARF2_LINE_BASE; // If the line increment is out of range of a special opcode, we must encode // it with DW_LNS_advance_line. if (Temp >= DWARF2_LINE_RANGE) { OS << char(dwarf::DW_LNS_advance_line); encodeSLEB128(LineDelta, OS); LineDelta = 0; Temp = 0 - DWARF2_LINE_BASE; NeedCopy = true; } // Use DW_LNS_copy instead of a "line +0, addr +0" special opcode. if (LineDelta == 0 && AddrDelta == 0) { OS << char(dwarf::DW_LNS_copy); return; } // Bias the opcode by the special opcode base. Temp += DWARF2_LINE_OPCODE_BASE; // Avoid overflow when addr_delta is large. if (AddrDelta < 256 + MAX_SPECIAL_ADDR_DELTA) { // Try using a special opcode. Opcode = Temp + AddrDelta * DWARF2_LINE_RANGE; if (Opcode <= 255) { OS << char(Opcode); return; } // Try using DW_LNS_const_add_pc followed by special op. Opcode = Temp + (AddrDelta - MAX_SPECIAL_ADDR_DELTA) * DWARF2_LINE_RANGE; if (Opcode <= 255) { OS << char(dwarf::DW_LNS_const_add_pc); OS << char(Opcode); return; } } // Otherwise use DW_LNS_advance_pc. OS << char(dwarf::DW_LNS_advance_pc); encodeULEB128(AddrDelta, OS); if (NeedCopy) OS << char(dwarf::DW_LNS_copy); else OS << char(Temp); } void MCDwarfFile::print(raw_ostream &OS) const { OS << '"' << getName() << '"'; } #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) void MCDwarfFile::dump() const { print(dbgs()); } #endif // Utility function to write a tuple for .debug_abbrev. static void EmitAbbrev(MCStreamer *MCOS, uint64_t Name, uint64_t Form) { MCOS->EmitULEB128IntValue(Name); MCOS->EmitULEB128IntValue(Form); } // When generating dwarf for assembly source files this emits // the data for .debug_abbrev section which contains three DIEs. static void EmitGenDwarfAbbrev(MCStreamer *MCOS) { MCContext &context = MCOS->getContext(); MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfAbbrevSection()); // DW_TAG_compile_unit DIE abbrev (1). MCOS->EmitULEB128IntValue(1); MCOS->EmitULEB128IntValue(dwarf::DW_TAG_compile_unit); MCOS->EmitIntValue(dwarf::DW_CHILDREN_yes, 1); EmitAbbrev(MCOS, dwarf::DW_AT_stmt_list, dwarf::DW_FORM_data4); EmitAbbrev(MCOS, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr); EmitAbbrev(MCOS, dwarf::DW_AT_high_pc, dwarf::DW_FORM_addr); EmitAbbrev(MCOS, dwarf::DW_AT_name, dwarf::DW_FORM_string); EmitAbbrev(MCOS, dwarf::DW_AT_comp_dir, dwarf::DW_FORM_string); StringRef DwarfDebugFlags = context.getDwarfDebugFlags(); if (!DwarfDebugFlags.empty()) EmitAbbrev(MCOS, dwarf::DW_AT_APPLE_flags, dwarf::DW_FORM_string); EmitAbbrev(MCOS, dwarf::DW_AT_producer, dwarf::DW_FORM_string); EmitAbbrev(MCOS, dwarf::DW_AT_language, dwarf::DW_FORM_data2); EmitAbbrev(MCOS, 0, 0); // DW_TAG_label DIE abbrev (2). MCOS->EmitULEB128IntValue(2); MCOS->EmitULEB128IntValue(dwarf::DW_TAG_label); MCOS->EmitIntValue(dwarf::DW_CHILDREN_yes, 1); EmitAbbrev(MCOS, dwarf::DW_AT_name, dwarf::DW_FORM_string); EmitAbbrev(MCOS, dwarf::DW_AT_decl_file, dwarf::DW_FORM_data4); EmitAbbrev(MCOS, dwarf::DW_AT_decl_line, dwarf::DW_FORM_data4); EmitAbbrev(MCOS, dwarf::DW_AT_low_pc, dwarf::DW_FORM_addr); EmitAbbrev(MCOS, dwarf::DW_AT_prototyped, dwarf::DW_FORM_flag); EmitAbbrev(MCOS, 0, 0); // DW_TAG_unspecified_parameters DIE abbrev (3). MCOS->EmitULEB128IntValue(3); MCOS->EmitULEB128IntValue(dwarf::DW_TAG_unspecified_parameters); MCOS->EmitIntValue(dwarf::DW_CHILDREN_no, 1); EmitAbbrev(MCOS, 0, 0); // Terminate the abbreviations for this compilation unit. MCOS->EmitIntValue(0, 1); } // When generating dwarf for assembly source files this emits the data for // .debug_aranges section. Which contains a header and a table of pairs of // PointerSize'ed values for the address and size of section(s) with line table // entries (just the default .text in our case) and a terminating pair of zeros. static void EmitGenDwarfAranges(MCStreamer *MCOS) { MCContext &context = MCOS->getContext(); // Create a symbol at the end of the section that we are creating the dwarf // debugging info to use later in here as part of the expression to calculate // the size of the section for the table. MCOS->SwitchSection(context.getGenDwarfSection()); MCSymbol *SectionEndSym = context.CreateTempSymbol(); MCOS->EmitLabel(SectionEndSym); context.setGenDwarfSectionEndSym(SectionEndSym); MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection()); // This will be the length of the .debug_aranges section, first account for // the size of each item in the header (see below where we emit these items). int Length = 4 + 2 + 4 + 1 + 1; // Figure the padding after the header before the table of address and size // pairs who's values are PointerSize'ed. const MCAsmInfo &asmInfo = context.getAsmInfo(); int AddrSize = asmInfo.getPointerSize(); int Pad = 2 * AddrSize - (Length & (2 * AddrSize - 1)); if (Pad == 2 * AddrSize) Pad = 0; Length += Pad; // Add the size of the pair of PointerSize'ed values for the address and size // of the one default .text section we have in the table. Length += 2 * AddrSize; // And the pair of terminating zeros. Length += 2 * AddrSize; // Emit the header for this section. // The 4 byte length not including the 4 byte value for the length. MCOS->EmitIntValue(Length - 4, 4); // The 2 byte version, which is 2. MCOS->EmitIntValue(2, 2); // The 4 byte offset to the compile unit in the .debug_info from the start // of the .debug_info, it is at the start of that section so this is zero. MCOS->EmitIntValue(0, 4); // The 1 byte size of an address. MCOS->EmitIntValue(AddrSize, 1); // The 1 byte size of a segment descriptor, we use a value of zero. MCOS->EmitIntValue(0, 1); // Align the header with the padding if needed, before we put out the table. for(int i = 0; i < Pad; i++) MCOS->EmitIntValue(0, 1); // Now emit the table of pairs of PointerSize'ed values for the section(s) // address and size, in our case just the one default .text section. const MCExpr *Addr = MCSymbolRefExpr::Create( context.getGenDwarfSectionStartSym(), MCSymbolRefExpr::VK_None, context); const MCExpr *Size = MakeStartMinusEndExpr(*MCOS, *context.getGenDwarfSectionStartSym(), *SectionEndSym, 0); MCOS->EmitAbsValue(Addr, AddrSize); MCOS->EmitAbsValue(Size, AddrSize); // And finally the pair of terminating zeros. MCOS->EmitIntValue(0, AddrSize); MCOS->EmitIntValue(0, AddrSize); } // When generating dwarf for assembly source files this emits the data for // .debug_info section which contains three parts. The header, the compile_unit // DIE and a list of label DIEs. static void EmitGenDwarfInfo(MCStreamer *MCOS, const MCSymbol *AbbrevSectionSymbol, const MCSymbol *LineSectionSymbol) { MCContext &context = MCOS->getContext(); MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection()); // Create a symbol at the start and end of this section used in here for the // expression to calculate the length in the header. MCSymbol *InfoStart = context.CreateTempSymbol(); MCOS->EmitLabel(InfoStart); MCSymbol *InfoEnd = context.CreateTempSymbol(); // First part: the header. // The 4 byte total length of the information for this compilation unit, not // including these 4 bytes. const MCExpr *Length = MakeStartMinusEndExpr(*MCOS, *InfoStart, *InfoEnd, 4); MCOS->EmitAbsValue(Length, 4); // The 2 byte DWARF version, which is 2. MCOS->EmitIntValue(2, 2); // The 4 byte offset to the debug abbrevs from the start of the .debug_abbrev, // it is at the start of that section so this is zero. if (AbbrevSectionSymbol) { MCOS->EmitSymbolValue(AbbrevSectionSymbol, 4); } else { MCOS->EmitIntValue(0, 4); } const MCAsmInfo &asmInfo = context.getAsmInfo(); int AddrSize = asmInfo.getPointerSize(); // The 1 byte size of an address. MCOS->EmitIntValue(AddrSize, 1); // Second part: the compile_unit DIE. // The DW_TAG_compile_unit DIE abbrev (1). MCOS->EmitULEB128IntValue(1); // DW_AT_stmt_list, a 4 byte offset from the start of the .debug_line section, // which is at the start of that section so this is zero. if (LineSectionSymbol) { MCOS->EmitSymbolValue(LineSectionSymbol, 4); } else { MCOS->EmitIntValue(0, 4); } // AT_low_pc, the first address of the default .text section. const MCExpr *Start = MCSymbolRefExpr::Create( context.getGenDwarfSectionStartSym(), MCSymbolRefExpr::VK_None, context); MCOS->EmitAbsValue(Start, AddrSize); // AT_high_pc, the last address of the default .text section. const MCExpr *End = MCSymbolRefExpr::Create( context.getGenDwarfSectionEndSym(), MCSymbolRefExpr::VK_None, context); MCOS->EmitAbsValue(End, AddrSize); // AT_name, the name of the source file. Reconstruct from the first directory // and file table entries. const std::vector<StringRef> &MCDwarfDirs = context.getMCDwarfDirs(); if (MCDwarfDirs.size() > 0) { MCOS->EmitBytes(MCDwarfDirs[0], 0); MCOS->EmitBytes("/", 0); } const std::vector<MCDwarfFile *> &MCDwarfFiles = MCOS->getContext().getMCDwarfFiles(); MCOS->EmitBytes(MCDwarfFiles[1]->getName(), 0); MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string. // AT_comp_dir, the working directory the assembly was done in. llvm::sys::Path CWD = llvm::sys::Path::GetCurrentDirectory(); MCOS->EmitBytes(StringRef(CWD.c_str()), 0); MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string. // AT_APPLE_flags, the command line arguments of the assembler tool. StringRef DwarfDebugFlags = context.getDwarfDebugFlags(); if (!DwarfDebugFlags.empty()){ MCOS->EmitBytes(DwarfDebugFlags, 0); MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string. } // AT_producer, the version of the assembler tool. MCOS->EmitBytes(StringRef("llvm-mc (based on LLVM "), 0); MCOS->EmitBytes(StringRef(PACKAGE_VERSION), 0); MCOS->EmitBytes(StringRef(")"), 0); MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string. // AT_language, a 4 byte value. We use DW_LANG_Mips_Assembler as the dwarf2 // draft has no standard code for assembler. MCOS->EmitIntValue(dwarf::DW_LANG_Mips_Assembler, 2); // Third part: the list of label DIEs. // Loop on saved info for dwarf labels and create the DIEs for them. const std::vector<const MCGenDwarfLabelEntry *> &Entries = MCOS->getContext().getMCGenDwarfLabelEntries(); for (std::vector<const MCGenDwarfLabelEntry *>::const_iterator it = Entries.begin(), ie = Entries.end(); it != ie; ++it) { const MCGenDwarfLabelEntry *Entry = *it; // The DW_TAG_label DIE abbrev (2). MCOS->EmitULEB128IntValue(2); // AT_name, of the label without any leading underbar. MCOS->EmitBytes(Entry->getName(), 0); MCOS->EmitIntValue(0, 1); // NULL byte to terminate the string. // AT_decl_file, index into the file table. MCOS->EmitIntValue(Entry->getFileNumber(), 4); // AT_decl_line, source line number. MCOS->EmitIntValue(Entry->getLineNumber(), 4); // AT_low_pc, start address of the label. const MCExpr *AT_low_pc = MCSymbolRefExpr::Create(Entry->getLabel(), MCSymbolRefExpr::VK_None, context); MCOS->EmitAbsValue(AT_low_pc, AddrSize); // DW_AT_prototyped, a one byte flag value of 0 saying we have no prototype. MCOS->EmitIntValue(0, 1); // The DW_TAG_unspecified_parameters DIE abbrev (3). MCOS->EmitULEB128IntValue(3); // Add the NULL DIE terminating the DW_TAG_unspecified_parameters DIE's. MCOS->EmitIntValue(0, 1); } // Deallocate the MCGenDwarfLabelEntry classes that saved away the info // for the dwarf labels. for (std::vector<const MCGenDwarfLabelEntry *>::const_iterator it = Entries.begin(), ie = Entries.end(); it != ie; ++it) { const MCGenDwarfLabelEntry *Entry = *it; delete Entry; } // Add the NULL DIE terminating the Compile Unit DIE's. MCOS->EmitIntValue(0, 1); // Now set the value of the symbol at the end of the info section. MCOS->EmitLabel(InfoEnd); } // // When generating dwarf for assembly source files this emits the Dwarf // sections. // void MCGenDwarfInfo::Emit(MCStreamer *MCOS, const MCSymbol *LineSectionSymbol) { // Create the dwarf sections in this order (.debug_line already created). MCContext &context = MCOS->getContext(); const MCAsmInfo &AsmInfo = context.getAsmInfo(); MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfInfoSection()); MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfAbbrevSection()); MCSymbol *AbbrevSectionSymbol; if (AsmInfo.doesDwarfUseRelocationsAcrossSections()) { AbbrevSectionSymbol = context.CreateTempSymbol(); MCOS->EmitLabel(AbbrevSectionSymbol); } else { AbbrevSectionSymbol = NULL; LineSectionSymbol = NULL; } MCOS->SwitchSection(context.getObjectFileInfo()->getDwarfARangesSection()); // If there are no line table entries then do not emit any section contents. if (context.getMCLineSections().empty()) return; // Output the data for .debug_aranges section. EmitGenDwarfAranges(MCOS); // Output the data for .debug_abbrev section. EmitGenDwarfAbbrev(MCOS); // Output the data for .debug_info section. EmitGenDwarfInfo(MCOS, AbbrevSectionSymbol, LineSectionSymbol); } // // When generating dwarf for assembly source files this is called when symbol // for a label is created. If this symbol is not a temporary and is in the // section that dwarf is being generated for, save the needed info to create // a dwarf label. // void MCGenDwarfLabelEntry::Make(MCSymbol *Symbol, MCStreamer *MCOS, SourceMgr &SrcMgr, SMLoc &Loc) { // We won't create dwarf labels for temporary symbols or symbols not in // the default text. if (Symbol->isTemporary()) return; MCContext &context = MCOS->getContext(); if (context.getGenDwarfSection() != MCOS->getCurrentSection()) return; // The dwarf label's name does not have the symbol name's leading // underbar if any. StringRef Name = Symbol->getName(); if (Name.startswith("_")) Name = Name.substr(1, Name.size()-1); // Get the dwarf file number to be used for the dwarf label. unsigned FileNumber = context.getGenDwarfFileNumber(); // Finding the line number is the expensive part which is why we just don't // pass it in as for some symbols we won't create a dwarf label. int CurBuffer = SrcMgr.FindBufferContainingLoc(Loc); unsigned LineNumber = SrcMgr.FindLineNumber(Loc, CurBuffer); // We create a temporary symbol for use for the AT_high_pc and AT_low_pc // values so that they don't have things like an ARM thumb bit from the // original symbol. So when used they won't get a low bit set after // relocation. MCSymbol *Label = context.CreateTempSymbol(); MCOS->EmitLabel(Label); // Create and entry for the info and add it to the other entries. MCGenDwarfLabelEntry *Entry = new MCGenDwarfLabelEntry(Name, FileNumber, LineNumber, Label); MCOS->getContext().addMCGenDwarfLabelEntry(Entry); } static int getDataAlignmentFactor(MCStreamer &streamer) { MCContext &context = streamer.getContext(); const MCAsmInfo &asmInfo = context.getAsmInfo(); int size = asmInfo.getPointerSize(); if (asmInfo.isStackGrowthDirectionUp()) return size; else return -size; } static unsigned getSizeForEncoding(MCStreamer &streamer, unsigned symbolEncoding) { MCContext &context = streamer.getContext(); unsigned format = symbolEncoding & 0x0f; switch (format) { default: llvm_unreachable("Unknown Encoding"); case dwarf::DW_EH_PE_absptr: case dwarf::DW_EH_PE_signed: return context.getAsmInfo().getPointerSize(); case dwarf::DW_EH_PE_udata2: case dwarf::DW_EH_PE_sdata2: return 2; case dwarf::DW_EH_PE_udata4: case dwarf::DW_EH_PE_sdata4: return 4; case dwarf::DW_EH_PE_udata8: case dwarf::DW_EH_PE_sdata8: return 8; } } static void EmitSymbol(MCStreamer &streamer, const MCSymbol &symbol, unsigned symbolEncoding, const char *comment = 0) { MCContext &context = streamer.getContext(); const MCAsmInfo &asmInfo = context.getAsmInfo(); const MCExpr *v = asmInfo.getExprForFDESymbol(&symbol, symbolEncoding, streamer); unsigned size = getSizeForEncoding(streamer, symbolEncoding); if (streamer.isVerboseAsm() && comment) streamer.AddComment(comment); streamer.EmitAbsValue(v, size); } static void EmitPersonality(MCStreamer &streamer, const MCSymbol &symbol, unsigned symbolEncoding) { MCContext &context = streamer.getContext(); const MCAsmInfo &asmInfo = context.getAsmInfo(); const MCExpr *v = asmInfo.getExprForPersonalitySymbol(&symbol, symbolEncoding, streamer); unsigned size = getSizeForEncoding(streamer, symbolEncoding); streamer.EmitValue(v, size); } static const MachineLocation TranslateMachineLocation( const MCRegisterInfo &MRI, const MachineLocation &Loc) { unsigned Reg = Loc.getReg() == MachineLocation::VirtualFP ? MachineLocation::VirtualFP : unsigned(MRI.getDwarfRegNum(Loc.getReg(), true)); const MachineLocation &NewLoc = Loc.isReg() ? MachineLocation(Reg) : MachineLocation(Reg, Loc.getOffset()); return NewLoc; } namespace { class FrameEmitterImpl { int CFAOffset; int CIENum; bool UsingCFI; bool IsEH; const MCSymbol *SectionStart; public: FrameEmitterImpl(bool usingCFI, bool isEH) : CFAOffset(0), CIENum(0), UsingCFI(usingCFI), IsEH(isEH), SectionStart(0) {} void setSectionStart(const MCSymbol *Label) { SectionStart = Label; } /// EmitCompactUnwind - Emit the unwind information in a compact way. If /// we're successful, return 'true'. Otherwise, return 'false' and it will /// emit the normal CIE and FDE. bool EmitCompactUnwind(MCStreamer &streamer, const MCDwarfFrameInfo &frame); const MCSymbol &EmitCIE(MCStreamer &streamer, const MCSymbol *personality, unsigned personalityEncoding, const MCSymbol *lsda, bool IsSignalFrame, unsigned lsdaEncoding); MCSymbol *EmitFDE(MCStreamer &streamer, const MCSymbol &cieStart, const MCDwarfFrameInfo &frame); void EmitCFIInstructions(MCStreamer &streamer, const std::vector<MCCFIInstruction> &Instrs, MCSymbol *BaseLabel); void EmitCFIInstruction(MCStreamer &Streamer, const MCCFIInstruction &Instr); }; } // end anonymous namespace static void EmitEncodingByte(MCStreamer &Streamer, unsigned Encoding, StringRef Prefix) { if (Streamer.isVerboseAsm()) { const char *EncStr; switch (Encoding) { default: EncStr = "<unknown encoding>"; break; case dwarf::DW_EH_PE_absptr: EncStr = "absptr"; break; case dwarf::DW_EH_PE_omit: EncStr = "omit"; break; case dwarf::DW_EH_PE_pcrel: EncStr = "pcrel"; break; case dwarf::DW_EH_PE_udata4: EncStr = "udata4"; break; case dwarf::DW_EH_PE_udata8: EncStr = "udata8"; break; case dwarf::DW_EH_PE_sdata4: EncStr = "sdata4"; break; case dwarf::DW_EH_PE_sdata8: EncStr = "sdata8"; break; case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata4: EncStr = "pcrel udata4"; break; case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata4: EncStr = "pcrel sdata4"; break; case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_udata8: EncStr = "pcrel udata8"; break; case dwarf::DW_EH_PE_pcrel | dwarf::DW_EH_PE_sdata8: EncStr = "screl sdata8"; break; case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata4: EncStr = "indirect pcrel udata4"; break; case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata4: EncStr = "indirect pcrel sdata4"; break; case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_udata8: EncStr = "indirect pcrel udata8"; break; case dwarf::DW_EH_PE_indirect |dwarf::DW_EH_PE_pcrel|dwarf::DW_EH_PE_sdata8: EncStr = "indirect pcrel sdata8"; break; } Streamer.AddComment(Twine(Prefix) + " = " + EncStr); } Streamer.EmitIntValue(Encoding, 1); } void FrameEmitterImpl::EmitCFIInstruction(MCStreamer &Streamer, const MCCFIInstruction &Instr) { int dataAlignmentFactor = getDataAlignmentFactor(Streamer); bool VerboseAsm = Streamer.isVerboseAsm(); switch (Instr.getOperation()) { case MCCFIInstruction::Move: case MCCFIInstruction::RelMove: { const MachineLocation &Dst = Instr.getDestination(); const MachineLocation &Src = Instr.getSource(); const bool IsRelative = Instr.getOperation() == MCCFIInstruction::RelMove; // If advancing cfa. if (Dst.isReg() && Dst.getReg() == MachineLocation::VirtualFP) { if (Src.getReg() == MachineLocation::VirtualFP) { if (VerboseAsm) Streamer.AddComment("DW_CFA_def_cfa_offset"); Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa_offset, 1); } else { if (VerboseAsm) Streamer.AddComment("DW_CFA_def_cfa"); Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa, 1); if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Src.getReg())); Streamer.EmitULEB128IntValue(Src.getReg()); } if (IsRelative) CFAOffset += Src.getOffset(); else CFAOffset = -Src.getOffset(); if (VerboseAsm) Streamer.AddComment(Twine("Offset " + Twine(CFAOffset))); Streamer.EmitULEB128IntValue(CFAOffset); return; } if (Src.isReg() && Src.getReg() == MachineLocation::VirtualFP) { assert(Dst.isReg() && "Machine move not supported yet."); if (VerboseAsm) Streamer.AddComment("DW_CFA_def_cfa_register"); Streamer.EmitIntValue(dwarf::DW_CFA_def_cfa_register, 1); if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Dst.getReg())); Streamer.EmitULEB128IntValue(Dst.getReg()); return; } unsigned Reg = Src.getReg(); int Offset = Dst.getOffset(); if (IsRelative) Offset -= CFAOffset; Offset = Offset / dataAlignmentFactor; if (Offset < 0) { if (VerboseAsm) Streamer.AddComment("DW_CFA_offset_extended_sf"); Streamer.EmitIntValue(dwarf::DW_CFA_offset_extended_sf, 1); if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Reg)); Streamer.EmitULEB128IntValue(Reg); if (VerboseAsm) Streamer.AddComment(Twine("Offset ") + Twine(Offset)); Streamer.EmitSLEB128IntValue(Offset); } else if (Reg < 64) { if (VerboseAsm) Streamer.AddComment(Twine("DW_CFA_offset + Reg(") + Twine(Reg) + ")"); Streamer.EmitIntValue(dwarf::DW_CFA_offset + Reg, 1); if (VerboseAsm) Streamer.AddComment(Twine("Offset ") + Twine(Offset)); Streamer.EmitULEB128IntValue(Offset); } else { if (VerboseAsm) Streamer.AddComment("DW_CFA_offset_extended"); Streamer.EmitIntValue(dwarf::DW_CFA_offset_extended, 1); if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Reg)); Streamer.EmitULEB128IntValue(Reg); if (VerboseAsm) Streamer.AddComment(Twine("Offset ") + Twine(Offset)); Streamer.EmitULEB128IntValue(Offset); } return; } case MCCFIInstruction::RememberState: if (VerboseAsm) Streamer.AddComment("DW_CFA_remember_state"); Streamer.EmitIntValue(dwarf::DW_CFA_remember_state, 1); return; case MCCFIInstruction::RestoreState: if (VerboseAsm) Streamer.AddComment("DW_CFA_restore_state"); Streamer.EmitIntValue(dwarf::DW_CFA_restore_state, 1); return; case MCCFIInstruction::SameValue: { unsigned Reg = Instr.getDestination().getReg(); if (VerboseAsm) Streamer.AddComment("DW_CFA_same_value"); Streamer.EmitIntValue(dwarf::DW_CFA_same_value, 1); if (VerboseAsm) Streamer.AddComment(Twine("Reg ") + Twine(Reg)); Streamer.EmitULEB128IntValue(Reg); return; } case MCCFIInstruction::Restore: { unsigned Reg = Instr.getDestination().getReg(); if (VerboseAsm) { Streamer.AddComment("DW_CFA_restore"); Streamer.AddComment(Twine("Reg ") + Twine(Reg)); } Streamer.EmitIntValue(dwarf::DW_CFA_restore | Reg, 1); return; } case MCCFIInstruction::Escape: if (VerboseAsm) Streamer.AddComment("Escape bytes"); Streamer.EmitBytes(Instr.getValues(), 0); return; } llvm_unreachable("Unhandled case in switch"); } /// EmitFrameMoves - Emit frame instructions to describe the layout of the /// frame. void FrameEmitterImpl::EmitCFIInstructions(MCStreamer &streamer, const std::vector<MCCFIInstruction> &Instrs, MCSymbol *BaseLabel) { for (unsigned i = 0, N = Instrs.size(); i < N; ++i) { const MCCFIInstruction &Instr = Instrs[i]; MCSymbol *Label = Instr.getLabel(); // Throw out move if the label is invalid. if (Label && !Label->isDefined()) continue; // Not emitted, in dead code. // Advance row if new location. if (BaseLabel && Label) { MCSymbol *ThisSym = Label; if (ThisSym != BaseLabel) { if (streamer.isVerboseAsm()) streamer.AddComment("DW_CFA_advance_loc4"); streamer.EmitDwarfAdvanceFrameAddr(BaseLabel, ThisSym); BaseLabel = ThisSym; } } EmitCFIInstruction(streamer, Instr); } } /// EmitCompactUnwind - Emit the unwind information in a compact way. If we're /// successful, return 'true'. Otherwise, return 'false' and it will emit the /// normal CIE and FDE. bool FrameEmitterImpl::EmitCompactUnwind(MCStreamer &Streamer, const MCDwarfFrameInfo &Frame) { MCContext &Context = Streamer.getContext(); const MCObjectFileInfo *MOFI = Context.getObjectFileInfo(); bool VerboseAsm = Streamer.isVerboseAsm(); // range-start range-length compact-unwind-enc personality-func lsda // _foo LfooEnd-_foo 0x00000023 0 0 // _bar LbarEnd-_bar 0x00000025 __gxx_personality except_tab1 // // .section __LD,__compact_unwind,regular,debug // // # compact unwind for _foo // .quad _foo // .set L1,LfooEnd-_foo // .long L1 // .long 0x01010001 // .quad 0 // .quad 0 // // # compact unwind for _bar // .quad _bar // .set L2,LbarEnd-_bar // .long L2 // .long 0x01020011 // .quad __gxx_personality // .quad except_tab1 uint32_t Encoding = Frame.CompactUnwindEncoding; if (!Encoding) return false; // The encoding needs to know we have an LSDA. if (Frame.Lsda) Encoding |= 0x40000000; Streamer.SwitchSection(MOFI->getCompactUnwindSection()); // Range Start unsigned FDEEncoding = MOFI->getFDEEncoding(UsingCFI); unsigned Size = getSizeForEncoding(Streamer, FDEEncoding); if (VerboseAsm) Streamer.AddComment("Range Start"); Streamer.EmitSymbolValue(Frame.Function, Size); // Range Length const MCExpr *Range = MakeStartMinusEndExpr(Streamer, *Frame.Begin, *Frame.End, 0); if (VerboseAsm) Streamer.AddComment("Range Length"); Streamer.EmitAbsValue(Range, 4); // Compact Encoding Size = getSizeForEncoding(Streamer, dwarf::DW_EH_PE_udata4); if (VerboseAsm) Streamer.AddComment("Compact Unwind Encoding: 0x" + Twine::utohexstr(Encoding)); Streamer.EmitIntValue(Encoding, Size); // Personality Function Size = getSizeForEncoding(Streamer, dwarf::DW_EH_PE_absptr); if (VerboseAsm) Streamer.AddComment("Personality Function"); if (Frame.Personality) Streamer.EmitSymbolValue(Frame.Personality, Size); else Streamer.EmitIntValue(0, Size); // No personality fn // LSDA Size = getSizeForEncoding(Streamer, Frame.LsdaEncoding); if (VerboseAsm) Streamer.AddComment("LSDA"); if (Frame.Lsda) Streamer.EmitSymbolValue(Frame.Lsda, Size); else Streamer.EmitIntValue(0, Size); // No LSDA return true; } const MCSymbol &FrameEmitterImpl::EmitCIE(MCStreamer &streamer, const MCSymbol *personality, unsigned personalityEncoding, const MCSymbol *lsda, bool IsSignalFrame, unsigned lsdaEncoding) { MCContext &context = streamer.getContext(); const MCRegisterInfo &MRI = context.getRegisterInfo(); const MCObjectFileInfo *MOFI = context.getObjectFileInfo(); bool verboseAsm = streamer.isVerboseAsm(); MCSymbol *sectionStart; if (MOFI->isFunctionEHFrameSymbolPrivate() || !IsEH) sectionStart = context.CreateTempSymbol(); else sectionStart = context.GetOrCreateSymbol(Twine("EH_frame") + Twine(CIENum)); streamer.EmitLabel(sectionStart); CIENum++; MCSymbol *sectionEnd = context.CreateTempSymbol(); // Length const MCExpr *Length = MakeStartMinusEndExpr(streamer, *sectionStart, *sectionEnd, 4); if (verboseAsm) streamer.AddComment("CIE Length"); streamer.EmitAbsValue(Length, 4); // CIE ID unsigned CIE_ID = IsEH ? 0 : -1; if (verboseAsm) streamer.AddComment("CIE ID Tag"); streamer.EmitIntValue(CIE_ID, 4); // Version if (verboseAsm) streamer.AddComment("DW_CIE_VERSION"); streamer.EmitIntValue(dwarf::DW_CIE_VERSION, 1); // Augmentation String SmallString<8> Augmentation; if (IsEH) { if (verboseAsm) streamer.AddComment("CIE Augmentation"); Augmentation += "z"; if (personality) Augmentation += "P"; if (lsda) Augmentation += "L"; Augmentation += "R"; if (IsSignalFrame) Augmentation += "S"; streamer.EmitBytes(Augmentation.str(), 0); } streamer.EmitIntValue(0, 1); // Code Alignment Factor if (verboseAsm) streamer.AddComment("CIE Code Alignment Factor"); streamer.EmitULEB128IntValue(1); // Data Alignment Factor if (verboseAsm) streamer.AddComment("CIE Data Alignment Factor"); streamer.EmitSLEB128IntValue(getDataAlignmentFactor(streamer)); // Return Address Register if (verboseAsm) streamer.AddComment("CIE Return Address Column"); streamer.EmitULEB128IntValue(MRI.getDwarfRegNum(MRI.getRARegister(), true)); // Augmentation Data Length (optional) unsigned augmentationLength = 0; if (IsEH) { if (personality) { // Personality Encoding augmentationLength += 1; // Personality augmentationLength += getSizeForEncoding(streamer, personalityEncoding); } if (lsda) augmentationLength += 1; // Encoding of the FDE pointers augmentationLength += 1; if (verboseAsm) streamer.AddComment("Augmentation Size"); streamer.EmitULEB128IntValue(augmentationLength); // Augmentation Data (optional) if (personality) { // Personality Encoding EmitEncodingByte(streamer, personalityEncoding, "Personality Encoding"); // Personality if (verboseAsm) streamer.AddComment("Personality"); EmitPersonality(streamer, *personality, personalityEncoding); } if (lsda) EmitEncodingByte(streamer, lsdaEncoding, "LSDA Encoding"); // Encoding of the FDE pointers EmitEncodingByte(streamer, MOFI->getFDEEncoding(UsingCFI), "FDE Encoding"); } // Initial Instructions const MCAsmInfo &MAI = context.getAsmInfo(); const std::vector<MachineMove> &Moves = MAI.getInitialFrameState(); std::vector<MCCFIInstruction> Instructions; for (int i = 0, n = Moves.size(); i != n; ++i) { MCSymbol *Label = Moves[i].getLabel(); const MachineLocation &Dst = TranslateMachineLocation(MRI, Moves[i].getDestination()); const MachineLocation &Src = TranslateMachineLocation(MRI, Moves[i].getSource()); MCCFIInstruction Inst(Label, Dst, Src); Instructions.push_back(Inst); } EmitCFIInstructions(streamer, Instructions, NULL); // Padding streamer.EmitValueToAlignment(IsEH ? 4 : context.getAsmInfo().getPointerSize()); streamer.EmitLabel(sectionEnd); return *sectionStart; } MCSymbol *FrameEmitterImpl::EmitFDE(MCStreamer &streamer, const MCSymbol &cieStart, const MCDwarfFrameInfo &frame) { MCContext &context = streamer.getContext(); MCSymbol *fdeStart = context.CreateTempSymbol(); MCSymbol *fdeEnd = context.CreateTempSymbol(); const MCObjectFileInfo *MOFI = context.getObjectFileInfo(); bool verboseAsm = streamer.isVerboseAsm(); if (IsEH && frame.Function && !MOFI->isFunctionEHFrameSymbolPrivate()) { MCSymbol *EHSym = context.GetOrCreateSymbol(frame.Function->getName() + Twine(".eh")); streamer.EmitEHSymAttributes(frame.Function, EHSym); streamer.EmitLabel(EHSym); } // Length const MCExpr *Length = MakeStartMinusEndExpr(streamer, *fdeStart, *fdeEnd, 0); if (verboseAsm) streamer.AddComment("FDE Length"); streamer.EmitAbsValue(Length, 4); streamer.EmitLabel(fdeStart); // CIE Pointer const MCAsmInfo &asmInfo = context.getAsmInfo(); if (IsEH) { const MCExpr *offset = MakeStartMinusEndExpr(streamer, cieStart, *fdeStart, 0); if (verboseAsm) streamer.AddComment("FDE CIE Offset"); streamer.EmitAbsValue(offset, 4); } else if (!asmInfo.doesDwarfUseRelocationsAcrossSections()) { const MCExpr *offset = MakeStartMinusEndExpr(streamer, *SectionStart, cieStart, 0); streamer.EmitAbsValue(offset, 4); } else { streamer.EmitSymbolValue(&cieStart, 4); } // PC Begin unsigned PCEncoding = IsEH ? MOFI->getFDEEncoding(UsingCFI) : (unsigned)dwarf::DW_EH_PE_absptr; unsigned PCSize = getSizeForEncoding(streamer, PCEncoding); EmitSymbol(streamer, *frame.Begin, PCEncoding, "FDE initial location"); // PC Range const MCExpr *Range = MakeStartMinusEndExpr(streamer, *frame.Begin, *frame.End, 0); if (verboseAsm) streamer.AddComment("FDE address range"); streamer.EmitAbsValue(Range, PCSize); if (IsEH) { // Augmentation Data Length unsigned augmentationLength = 0; if (frame.Lsda) augmentationLength += getSizeForEncoding(streamer, frame.LsdaEncoding); if (verboseAsm) streamer.AddComment("Augmentation size"); streamer.EmitULEB128IntValue(augmentationLength); // Augmentation Data if (frame.Lsda) EmitSymbol(streamer, *frame.Lsda, frame.LsdaEncoding, "Language Specific Data Area"); } // Call Frame Instructions EmitCFIInstructions(streamer, frame.Instructions, frame.Begin); // Padding streamer.EmitValueToAlignment(PCSize); return fdeEnd; } namespace { struct CIEKey { static const CIEKey getEmptyKey() { return CIEKey(0, 0, -1, false); } static const CIEKey getTombstoneKey() { return CIEKey(0, -1, 0, false); } CIEKey(const MCSymbol* Personality_, unsigned PersonalityEncoding_, unsigned LsdaEncoding_, bool IsSignalFrame_) : Personality(Personality_), PersonalityEncoding(PersonalityEncoding_), LsdaEncoding(LsdaEncoding_), IsSignalFrame(IsSignalFrame_) { } const MCSymbol* Personality; unsigned PersonalityEncoding; unsigned LsdaEncoding; bool IsSignalFrame; }; } namespace llvm { template <> struct DenseMapInfo<CIEKey> { static CIEKey getEmptyKey() { return CIEKey::getEmptyKey(); } static CIEKey getTombstoneKey() { return CIEKey::getTombstoneKey(); } static unsigned getHashValue(const CIEKey &Key) { return static_cast<unsigned>(hash_combine(Key.Personality, Key.PersonalityEncoding, Key.LsdaEncoding, Key.IsSignalFrame)); } static bool isEqual(const CIEKey &LHS, const CIEKey &RHS) { return LHS.Personality == RHS.Personality && LHS.PersonalityEncoding == RHS.PersonalityEncoding && LHS.LsdaEncoding == RHS.LsdaEncoding && LHS.IsSignalFrame == RHS.IsSignalFrame; } }; } void MCDwarfFrameEmitter::Emit(MCStreamer &Streamer, bool UsingCFI, bool IsEH) { MCContext &Context = Streamer.getContext(); MCObjectFileInfo *MOFI = const_cast<MCObjectFileInfo*>(Context.getObjectFileInfo()); FrameEmitterImpl Emitter(UsingCFI, IsEH); ArrayRef<MCDwarfFrameInfo> FrameArray = Streamer.getFrameInfos(); // Emit the compact unwind info if available. if (IsEH && MOFI->getCompactUnwindSection()) for (unsigned i = 0, n = Streamer.getNumFrameInfos(); i < n; ++i) { const MCDwarfFrameInfo &Frame = Streamer.getFrameInfo(i); if (Frame.CompactUnwindEncoding) Emitter.EmitCompactUnwind(Streamer, Frame); } const MCSection &Section = IsEH ? *MOFI->getEHFrameSection() : *MOFI->getDwarfFrameSection(); Streamer.SwitchSection(&Section); MCSymbol *SectionStart = Context.CreateTempSymbol(); Streamer.EmitLabel(SectionStart); Emitter.setSectionStart(SectionStart); MCSymbol *FDEEnd = NULL; DenseMap<CIEKey, const MCSymbol*> CIEStarts; const MCSymbol *DummyDebugKey = NULL; for (unsigned i = 0, n = FrameArray.size(); i < n; ++i) { const MCDwarfFrameInfo &Frame = FrameArray[i]; CIEKey Key(Frame.Personality, Frame.PersonalityEncoding, Frame.LsdaEncoding, Frame.IsSignalFrame); const MCSymbol *&CIEStart = IsEH ? CIEStarts[Key] : DummyDebugKey; if (!CIEStart) CIEStart = &Emitter.EmitCIE(Streamer, Frame.Personality, Frame.PersonalityEncoding, Frame.Lsda, Frame.IsSignalFrame, Frame.LsdaEncoding); FDEEnd = Emitter.EmitFDE(Streamer, *CIEStart, Frame); if (i != n - 1) Streamer.EmitLabel(FDEEnd); } Streamer.EmitValueToAlignment(Context.getAsmInfo().getPointerSize()); if (FDEEnd) Streamer.EmitLabel(FDEEnd); } void MCDwarfFrameEmitter::EmitAdvanceLoc(MCStreamer &Streamer, uint64_t AddrDelta) { SmallString<256> Tmp; raw_svector_ostream OS(Tmp); MCDwarfFrameEmitter::EncodeAdvanceLoc(AddrDelta, OS); Streamer.EmitBytes(OS.str(), /*AddrSpace=*/0); } void MCDwarfFrameEmitter::EncodeAdvanceLoc(uint64_t AddrDelta, raw_ostream &OS) { // FIXME: Assumes the code alignment factor is 1. if (AddrDelta == 0) { } else if (isUIntN(6, AddrDelta)) { uint8_t Opcode = dwarf::DW_CFA_advance_loc | AddrDelta; OS << Opcode; } else if (isUInt<8>(AddrDelta)) { OS << uint8_t(dwarf::DW_CFA_advance_loc1); OS << uint8_t(AddrDelta); } else if (isUInt<16>(AddrDelta)) { // FIXME: check what is the correct behavior on a big endian machine. OS << uint8_t(dwarf::DW_CFA_advance_loc2); OS << uint8_t( AddrDelta & 0xff); OS << uint8_t((AddrDelta >> 8) & 0xff); } else { // FIXME: check what is the correct behavior on a big endian machine. assert(isUInt<32>(AddrDelta)); OS << uint8_t(dwarf::DW_CFA_advance_loc4); OS << uint8_t( AddrDelta & 0xff); OS << uint8_t((AddrDelta >> 8) & 0xff); OS << uint8_t((AddrDelta >> 16) & 0xff); OS << uint8_t((AddrDelta >> 24) & 0xff); } }
/* * Copyright (C) 2019 The LineageOS Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define LOG_TAG "FingerprintInscreenService" #include "FingerprintInscreen.h" #include <android-base/logging.h> #include <fstream> #include <cmath> #define FINGERPRINT_ERROR_VENDOR 8 #define COMMAND_NIT 10 #define PARAM_NIT_FOD 3 #define PARAM_NIT_NONE 0 #define DISPPARAM_PATH "/sys/devices/platform/soc/5e00000.qcom,mdss_mdp/drm/card0/card0-DSI-1/disp_param" #define DISPPARAM_FOD_BACKLIGHT_HBM "0x1d007ff" #define DISPPARAM_FOD_BACKLIGHT_RESET "0x20f0000" #define FOD_STATUS_PATH "/sys/class/touch/tp_dev/fod_status" #define FOD_STATUS_ON 1 #define FOD_STATUS_OFF 0 #define FOD_SENSOR_X 293 #define FOD_SENSOR_Y 1356 #define FOD_SENSOR_SIZE 134 namespace vendor { namespace lineage { namespace biometrics { namespace fingerprint { namespace inscreen { namespace V1_0 { namespace implementation { template <typename T> static void set(const std::string& path, const T& value) { std::ofstream file(path); file << value; } FingerprintInscreen::FingerprintInscreen() { xiaomiFingerprintService = IXiaomiFingerprint::getService(); } Return<int32_t> FingerprintInscreen::getPositionX() { return FOD_SENSOR_X; } Return<int32_t> FingerprintInscreen::getPositionY() { return FOD_SENSOR_Y; } Return<int32_t> FingerprintInscreen::getSize() { return FOD_SENSOR_SIZE; } Return<void> FingerprintInscreen::onStartEnroll() { return Void(); } Return<void> FingerprintInscreen::onFinishEnroll() { return Void(); } Return<void> FingerprintInscreen::onPress() { set(DISPPARAM_PATH, DISPPARAM_FOD_BACKLIGHT_HBM); xiaomiFingerprintService->extCmd(COMMAND_NIT, PARAM_NIT_FOD); return Void(); } Return<void> FingerprintInscreen::onRelease() { set(DISPPARAM_PATH, DISPPARAM_FOD_BACKLIGHT_RESET); xiaomiFingerprintService->extCmd(COMMAND_NIT, PARAM_NIT_NONE); return Void(); } Return<void> FingerprintInscreen::onShowFODView() { set(FOD_STATUS_PATH, FOD_STATUS_ON); return Void(); } Return<void> FingerprintInscreen::onHideFODView() { set(FOD_STATUS_PATH, FOD_STATUS_OFF); set(DISPPARAM_PATH, DISPPARAM_FOD_BACKLIGHT_RESET); xiaomiFingerprintService->extCmd(COMMAND_NIT, PARAM_NIT_NONE); return Void(); } Return<bool> FingerprintInscreen::handleAcquired(int32_t acquiredInfo, int32_t vendorCode) { LOG(ERROR) << "acquiredInfo: " << acquiredInfo << ", vendorCode: " << vendorCode << "\n"; return false; } Return<bool> FingerprintInscreen::handleError(int32_t error, int32_t vendorCode) { LOG(ERROR) << "error: " << error << ", vendorCode: " << vendorCode << "\n"; return error == FINGERPRINT_ERROR_VENDOR && vendorCode == 6; } Return<void> FingerprintInscreen::setLongPressEnabled(bool) { return Void(); } Return<int32_t> FingerprintInscreen::getDimAmount(int32_t brightness) { float alpha; if (brightness > 62.0) { alpha = 1.0 - pow((((brightness / 255.0) * 430.0) / 600.0), 0.455); } else { alpha = 1.0 - pow((brightness / 210.0), 0.455); } return 255 * alpha; } Return<bool> FingerprintInscreen::shouldBoostBrightness() { return false; } Return<void> FingerprintInscreen::setCallback(const sp<::vendor::lineage::biometrics::fingerprint::inscreen::V1_0::IFingerprintInscreenCallback>& callback) { (void) callback; return Void(); } } // namespace implementation } // namespace V1_0 } // namespace inscreen } // namespace fingerprint } // namespace biometrics } // namespace lineage } // namespace vendor
// Copyright (c) 2009-2017 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/SIQS-config.h" #endif #include <cstring> #if HAVE_DECL_STRNLEN == 0 size_t strnlen( const char *start, size_t max_len) { const char *end = (const char *)memchr(start, '\0', max_len); return end ? (size_t)(end - start) : max_len; } #endif // HAVE_DECL_STRNLEN
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/compiler/xla/service/compiler.h" #include <string> #include <utility> #include "tensorflow/compiler/xla/types.h" #include "tensorflow/compiler/xla/util.h" #include "tensorflow/core/platform/logging.h" #include "tensorflow/core/platform/macros.h" namespace se = ::perftools::gputools; namespace xla { /* static */ tensorflow::mutex* Compiler::platform_compiler_mutex_; /* static */ void Compiler::LazyInitMutex() { static std::once_flag mutex_init_flag; std::call_once(mutex_init_flag, []() { Compiler::platform_compiler_mutex_ = new tensorflow::mutex; }); } /* static */ std::map<perftools::gputools::Platform::Id, Compiler::CompilerFactory>* Compiler::GetPlatformCompilerFactories() { static auto* r = new std::map<perftools::gputools::Platform::Id, CompilerFactory>; return r; } /* static */ std::map<perftools::gputools::Platform::Id, std::unique_ptr<Compiler>>* Compiler::GetPlatformCompilers() { static auto* r = new std::map<perftools::gputools::Platform::Id, std::unique_ptr<Compiler>>; return r; } /* static */ void Compiler::RegisterCompilerFactory( se::Platform::Id platform_id, std::function<std::unique_ptr<Compiler>()> compiler_factory) { LazyInitMutex(); tensorflow::mutex_lock lock(*platform_compiler_mutex_); auto* factories = GetPlatformCompilerFactories(); CHECK(factories->find(platform_id) == factories->end()); (*factories)[platform_id] = std::move(compiler_factory); } /* static */ StatusOr<Compiler*> Compiler::GetForPlatform( const se::Platform* platform) { LazyInitMutex(); tensorflow::mutex_lock lock(*platform_compiler_mutex_); auto* compilers = GetPlatformCompilers(); // See if we already instantiated a compiler for this platform. { auto it = compilers->find(platform->id()); if (it != compilers->end()) { return it->second.get(); } // If not, we just fall through to try to create one with a registered // factory. } auto* factories = GetPlatformCompilerFactories(); auto it = factories->find(platform->id()); if (it == factories->end()) { return NotFound( "could not find registered compiler for platform %s -- check " "target linkage", platform->Name().c_str()); } // And then we invoke the factory, placing the result into the mapping. compilers->insert(std::make_pair(platform->id(), it->second())); return compilers->at(platform->id()).get(); } } // namespace xla
#include"iostream" #include"stdio.h" using namespace std; int main() { static int a[2]={1,2}; int* ptr[5]; int p=5,p2=6,*page,*page2; int Test[2][3]={{1,2,3},{4,5,6}}; int Test2[3]={1,2,3}; page=&p; page2=&p2; ptr[0]=page; ptr[1]=page2; int (*A)[3],(*B)[3]; // 数组指针 A=&Test[1]; B=&Test2; cout<<*page<<endl; cout<<(*A)[0]<<' '<<(*A)[1]<<' '<<(*A)[2]<<endl; // Test和Test2分配内存的时候是连续分配的,所以下面0-8都可以 for(int i=0;i<9;i++) cout<<(*B)[i]<<' '; cout<<endl; cout<<(*B)[9]<<endl; // 会输出不是想要的值 system("pause"); return 0; }
/* * BoolLit.cpp * * Created on: Jun 8, 2011 * Author: joe */ #include "BoolLit.h" #include "../Domain.h" #include <boost/archive/text_iarchive.hpp> #include <boost/archive/text_oarchive.hpp> bool BoolLit::doEquals(const Sentence& t) const { const BoolLit *bt = dynamic_cast<const BoolLit*>(&t); if (bt == NULL) { return false; } return bt->val_ == val_; } SISet BoolLit::satisfied(const Model& m, const Domain& d, bool forceLiquid) const { if (!val_) return SISet(forceLiquid, d.maxInterval()); return SISet(d.maxSpanInterval(), forceLiquid, d.maxInterval()); }
#include <spatial/ecs/EntityHandle.h> #include <spatial/ecs/Registry.h> namespace spatial::ecs { Registry::Registry() : mRegistry{} { } Entity Registry::createEntity() { return mRegistry.create(); } bool Registry::isValid(Entity entity) const noexcept { return mRegistry.valid(entity); } size_t Registry::getEntitiesCount() const noexcept { return mRegistry.size(); } void Registry::destroy(Entity entity) { mRegistry.destroy(entity); } Registry::VersionType Registry::getVersion(Entity entity) noexcept { return entt::registry::version(entity); } } // namespace spatial
/* For more information, please see: http://software.sci.utah.edu The MIT License Copyright (c) 2009 Scientific Computing and Imaging Institute, University of Utah. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /// @todo Documentation Core/Datatypes/Legacy/Field/VFDataT_2.cc #include <Core/Datatypes/Legacy/Field/VFDataT.h> namespace SCIRun { VFDATA_FUNCTION_SCALAR_DEFINITION(long long) VFDATA_FUNCTION_SCALAR_DEFINITION(unsigned long long) VFDATA_FUNCTION_SCALAR_DEFINITION(int) VFDATA_FUNCTION_SCALAR_DEFINITION(unsigned int) VFDATA_FUNCTION_SCALAR_DEFINITION(float) }
// Copyright 2007-2009 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following // disclaimer in the documentation and/or other materials provided // with the distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include <limits.h> #include "v8.h" #include "api.h" #include "compilation-cache.h" #include "execution.h" #include "snapshot.h" #include "platform.h" #include "top.h" #include "utils.h" #include "cctest.h" static const bool kLogThreading = false; static bool IsNaN(double x) { #ifdef WIN32 return _isnan(x); #else return isnan(x); #endif } using ::v8::ObjectTemplate; using ::v8::Value; using ::v8::Context; using ::v8::Local; using ::v8::String; using ::v8::Script; using ::v8::Function; using ::v8::AccessorInfo; using ::v8::Extension; namespace i = ::v8::internal; static void ExpectString(const char* code, const char* expected) { Local<Value> result = CompileRun(code); CHECK(result->IsString()); String::AsciiValue ascii(result); CHECK_EQ(expected, *ascii); } static void ExpectBoolean(const char* code, bool expected) { Local<Value> result = CompileRun(code); CHECK(result->IsBoolean()); CHECK_EQ(expected, result->BooleanValue()); } static void ExpectObject(const char* code, Local<Value> expected) { Local<Value> result = CompileRun(code); CHECK(result->Equals(expected)); } static int signature_callback_count; static v8::Handle<Value> IncrementingSignatureCallback( const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); signature_callback_count++; v8::Handle<v8::Array> result = v8::Array::New(args.Length()); for (int i = 0; i < args.Length(); i++) result->Set(v8::Integer::New(i), args[i]); return result; } static v8::Handle<Value> SignatureCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); v8::Handle<v8::Array> result = v8::Array::New(args.Length()); for (int i = 0; i < args.Length(); i++) { result->Set(v8::Integer::New(i), args[i]); } return result; } THREADED_TEST(Handles) { v8::HandleScope scope; Local<Context> local_env; { LocalContext env; local_env = env.local(); } // Local context should still be live. CHECK(!local_env.IsEmpty()); local_env->Enter(); v8::Handle<v8::Primitive> undef = v8::Undefined(); CHECK(!undef.IsEmpty()); CHECK(undef->IsUndefined()); const char* c_source = "1 + 2 + 3"; Local<String> source = String::New(c_source); Local<Script> script = Script::Compile(source); CHECK_EQ(6, script->Run()->Int32Value()); local_env->Exit(); } THREADED_TEST(ReceiverSignature) { v8::HandleScope scope; LocalContext env; v8::Handle<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(); v8::Handle<v8::Signature> sig = v8::Signature::New(fun); fun->PrototypeTemplate()->Set( v8_str("m"), v8::FunctionTemplate::New(IncrementingSignatureCallback, v8::Handle<Value>(), sig)); env->Global()->Set(v8_str("Fun"), fun->GetFunction()); signature_callback_count = 0; CompileRun( "var o = new Fun();" "o.m();"); CHECK_EQ(1, signature_callback_count); v8::Handle<v8::FunctionTemplate> sub_fun = v8::FunctionTemplate::New(); sub_fun->Inherit(fun); env->Global()->Set(v8_str("SubFun"), sub_fun->GetFunction()); CompileRun( "var o = new SubFun();" "o.m();"); CHECK_EQ(2, signature_callback_count); v8::TryCatch try_catch; CompileRun( "var o = { };" "o.m = Fun.prototype.m;" "o.m();"); CHECK_EQ(2, signature_callback_count); CHECK(try_catch.HasCaught()); try_catch.Reset(); v8::Handle<v8::FunctionTemplate> unrel_fun = v8::FunctionTemplate::New(); sub_fun->Inherit(fun); env->Global()->Set(v8_str("UnrelFun"), unrel_fun->GetFunction()); CompileRun( "var o = new UnrelFun();" "o.m = Fun.prototype.m;" "o.m();"); CHECK_EQ(2, signature_callback_count); CHECK(try_catch.HasCaught()); } THREADED_TEST(ArgumentSignature) { v8::HandleScope scope; LocalContext env; v8::Handle<v8::FunctionTemplate> cons = v8::FunctionTemplate::New(); cons->SetClassName(v8_str("Cons")); v8::Handle<v8::Signature> sig = v8::Signature::New(v8::Handle<v8::FunctionTemplate>(), 1, &cons); v8::Handle<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(SignatureCallback, v8::Handle<Value>(), sig); env->Global()->Set(v8_str("Cons"), cons->GetFunction()); env->Global()->Set(v8_str("Fun1"), fun->GetFunction()); v8::Handle<Value> value1 = CompileRun("Fun1(4) == '';"); CHECK(value1->IsTrue()); v8::Handle<Value> value2 = CompileRun("Fun1(new Cons()) == '[object Cons]';"); CHECK(value2->IsTrue()); v8::Handle<Value> value3 = CompileRun("Fun1() == '';"); CHECK(value3->IsTrue()); v8::Handle<v8::FunctionTemplate> cons1 = v8::FunctionTemplate::New(); cons1->SetClassName(v8_str("Cons1")); v8::Handle<v8::FunctionTemplate> cons2 = v8::FunctionTemplate::New(); cons2->SetClassName(v8_str("Cons2")); v8::Handle<v8::FunctionTemplate> cons3 = v8::FunctionTemplate::New(); cons3->SetClassName(v8_str("Cons3")); v8::Handle<v8::FunctionTemplate> args[3] = { cons1, cons2, cons3 }; v8::Handle<v8::Signature> wsig = v8::Signature::New(v8::Handle<v8::FunctionTemplate>(), 3, args); v8::Handle<v8::FunctionTemplate> fun2 = v8::FunctionTemplate::New(SignatureCallback, v8::Handle<Value>(), wsig); env->Global()->Set(v8_str("Cons1"), cons1->GetFunction()); env->Global()->Set(v8_str("Cons2"), cons2->GetFunction()); env->Global()->Set(v8_str("Cons3"), cons3->GetFunction()); env->Global()->Set(v8_str("Fun2"), fun2->GetFunction()); v8::Handle<Value> value4 = CompileRun( "Fun2(new Cons1(), new Cons2(), new Cons3()) ==" "'[object Cons1],[object Cons2],[object Cons3]'"); CHECK(value4->IsTrue()); v8::Handle<Value> value5 = CompileRun( "Fun2(new Cons1(), new Cons2(), 5) == '[object Cons1],[object Cons2],'"); CHECK(value5->IsTrue()); v8::Handle<Value> value6 = CompileRun( "Fun2(new Cons3(), new Cons2(), new Cons1()) == ',[object Cons2],'"); CHECK(value6->IsTrue()); v8::Handle<Value> value7 = CompileRun( "Fun2(new Cons1(), new Cons2(), new Cons3(), 'd') == " "'[object Cons1],[object Cons2],[object Cons3],d';"); CHECK(value7->IsTrue()); v8::Handle<Value> value8 = CompileRun( "Fun2(new Cons1(), new Cons2()) == '[object Cons1],[object Cons2]'"); CHECK(value8->IsTrue()); } THREADED_TEST(HulIgennem) { v8::HandleScope scope; LocalContext env; v8::Handle<v8::Primitive> undef = v8::Undefined(); Local<String> undef_str = undef->ToString(); char* value = i::NewArray<char>(undef_str->Length() + 1); undef_str->WriteAscii(value); CHECK_EQ(0, strcmp(value, "undefined")); i::DeleteArray(value); } THREADED_TEST(Access) { v8::HandleScope scope; LocalContext env; Local<v8::Object> obj = v8::Object::New(); Local<Value> foo_before = obj->Get(v8_str("foo")); CHECK(foo_before->IsUndefined()); Local<String> bar_str = v8_str("bar"); obj->Set(v8_str("foo"), bar_str); Local<Value> foo_after = obj->Get(v8_str("foo")); CHECK(!foo_after->IsUndefined()); CHECK(foo_after->IsString()); CHECK_EQ(bar_str, foo_after); } THREADED_TEST(Script) { v8::HandleScope scope; LocalContext env; const char* c_source = "1 + 2 + 3"; Local<String> source = String::New(c_source); Local<Script> script = Script::Compile(source); CHECK_EQ(6, script->Run()->Int32Value()); } static uint16_t* AsciiToTwoByteString(const char* source) { int array_length = i::StrLength(source) + 1; uint16_t* converted = i::NewArray<uint16_t>(array_length); for (int i = 0; i < array_length; i++) converted[i] = source[i]; return converted; } class TestResource: public String::ExternalStringResource { public: static int dispose_count; explicit TestResource(uint16_t* data) : data_(data), length_(0) { while (data[length_]) ++length_; } ~TestResource() { i::DeleteArray(data_); ++dispose_count; } const uint16_t* data() const { return data_; } size_t length() const { return length_; } private: uint16_t* data_; size_t length_; }; int TestResource::dispose_count = 0; class TestAsciiResource: public String::ExternalAsciiStringResource { public: static int dispose_count; explicit TestAsciiResource(const char* data) : data_(data), length_(strlen(data)) { } ~TestAsciiResource() { i::DeleteArray(data_); ++dispose_count; } const char* data() const { return data_; } size_t length() const { return length_; } private: const char* data_; size_t length_; }; int TestAsciiResource::dispose_count = 0; THREADED_TEST(ScriptUsingStringResource) { TestResource::dispose_count = 0; const char* c_source = "1 + 2 * 3"; uint16_t* two_byte_source = AsciiToTwoByteString(c_source); { v8::HandleScope scope; LocalContext env; TestResource* resource = new TestResource(two_byte_source); Local<String> source = String::NewExternal(resource); Local<Script> script = Script::Compile(source); Local<Value> value = script->Run(); CHECK(value->IsNumber()); CHECK_EQ(7, value->Int32Value()); CHECK(source->IsExternal()); CHECK_EQ(resource, static_cast<TestResource*>(source->GetExternalStringResource())); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(0, TestResource::dispose_count); } v8::internal::CompilationCache::Clear(); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(1, TestResource::dispose_count); } THREADED_TEST(ScriptUsingAsciiStringResource) { TestAsciiResource::dispose_count = 0; const char* c_source = "1 + 2 * 3"; { v8::HandleScope scope; LocalContext env; Local<String> source = String::NewExternal(new TestAsciiResource(i::StrDup(c_source))); Local<Script> script = Script::Compile(source); Local<Value> value = script->Run(); CHECK(value->IsNumber()); CHECK_EQ(7, value->Int32Value()); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(0, TestAsciiResource::dispose_count); } v8::internal::CompilationCache::Clear(); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(1, TestAsciiResource::dispose_count); } THREADED_TEST(ScriptMakingExternalString) { TestResource::dispose_count = 0; uint16_t* two_byte_source = AsciiToTwoByteString("1 + 2 * 3"); { v8::HandleScope scope; LocalContext env; Local<String> source = String::New(two_byte_source); bool success = source->MakeExternal(new TestResource(two_byte_source)); CHECK(success); Local<Script> script = Script::Compile(source); Local<Value> value = script->Run(); CHECK(value->IsNumber()); CHECK_EQ(7, value->Int32Value()); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(0, TestResource::dispose_count); } v8::internal::CompilationCache::Clear(); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(1, TestResource::dispose_count); } THREADED_TEST(ScriptMakingExternalAsciiString) { TestAsciiResource::dispose_count = 0; const char* c_source = "1 + 2 * 3"; { v8::HandleScope scope; LocalContext env; Local<String> source = v8_str(c_source); bool success = source->MakeExternal( new TestAsciiResource(i::StrDup(c_source))); CHECK(success); Local<Script> script = Script::Compile(source); Local<Value> value = script->Run(); CHECK(value->IsNumber()); CHECK_EQ(7, value->Int32Value()); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(0, TestAsciiResource::dispose_count); } v8::internal::CompilationCache::Clear(); v8::internal::Heap::CollectAllGarbage(false); CHECK_EQ(1, TestAsciiResource::dispose_count); } THREADED_TEST(UsingExternalString) { { v8::HandleScope scope; uint16_t* two_byte_string = AsciiToTwoByteString("test string"); Local<String> string = String::NewExternal(new TestResource(two_byte_string)); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); // Trigger GCs so that the newly allocated string moves to old gen. i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring); CHECK(isymbol->IsSymbol()); } i::Heap::CollectAllGarbage(false); i::Heap::CollectAllGarbage(false); } THREADED_TEST(UsingExternalAsciiString) { { v8::HandleScope scope; const char* one_byte_string = "test string"; Local<String> string = String::NewExternal( new TestAsciiResource(i::StrDup(one_byte_string))); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); // Trigger GCs so that the newly allocated string moves to old gen. i::Heap::CollectGarbage(0, i::NEW_SPACE); // in survivor space now i::Heap::CollectGarbage(0, i::NEW_SPACE); // in old gen now i::Handle<i::String> isymbol = i::Factory::SymbolFromString(istring); CHECK(isymbol->IsSymbol()); } i::Heap::CollectAllGarbage(false); i::Heap::CollectAllGarbage(false); } THREADED_TEST(ScavengeExternalString) { TestResource::dispose_count = 0; { v8::HandleScope scope; uint16_t* two_byte_string = AsciiToTwoByteString("test string"); Local<String> string = String::NewExternal(new TestResource(two_byte_string)); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); i::Heap::CollectGarbage(0, i::NEW_SPACE); CHECK(i::Heap::InNewSpace(*istring)); CHECK_EQ(0, TestResource::dispose_count); } i::Heap::CollectGarbage(0, i::NEW_SPACE); CHECK_EQ(1, TestResource::dispose_count); } THREADED_TEST(ScavengeExternalAsciiString) { TestAsciiResource::dispose_count = 0; { v8::HandleScope scope; const char* one_byte_string = "test string"; Local<String> string = String::NewExternal( new TestAsciiResource(i::StrDup(one_byte_string))); i::Handle<i::String> istring = v8::Utils::OpenHandle(*string); i::Heap::CollectGarbage(0, i::NEW_SPACE); CHECK(i::Heap::InNewSpace(*istring)); CHECK_EQ(0, TestAsciiResource::dispose_count); } i::Heap::CollectGarbage(0, i::NEW_SPACE); CHECK_EQ(1, TestAsciiResource::dispose_count); } THREADED_TEST(StringConcat) { { v8::HandleScope scope; LocalContext env; const char* one_byte_string_1 = "function a_times_t"; const char* two_byte_string_1 = "wo_plus_b(a, b) {return "; const char* one_byte_extern_1 = "a * 2 + b;} a_times_two_plus_b(4, 8) + "; const char* two_byte_extern_1 = "a_times_two_plus_b(4, 8) + "; const char* one_byte_string_2 = "a_times_two_plus_b(4, 8) + "; const char* two_byte_string_2 = "a_times_two_plus_b(4, 8) + "; const char* two_byte_extern_2 = "a_times_two_plus_b(1, 2);"; Local<String> left = v8_str(one_byte_string_1); Local<String> right = String::New(AsciiToTwoByteString(two_byte_string_1)); Local<String> source = String::Concat(left, right); right = String::NewExternal( new TestAsciiResource(i::StrDup(one_byte_extern_1))); source = String::Concat(source, right); right = String::NewExternal( new TestResource(AsciiToTwoByteString(two_byte_extern_1))); source = String::Concat(source, right); right = v8_str(one_byte_string_2); source = String::Concat(source, right); right = String::New(AsciiToTwoByteString(two_byte_string_2)); source = String::Concat(source, right); right = String::NewExternal( new TestResource(AsciiToTwoByteString(two_byte_extern_2))); source = String::Concat(source, right); Local<Script> script = Script::Compile(source); Local<Value> value = script->Run(); CHECK(value->IsNumber()); CHECK_EQ(68, value->Int32Value()); } v8::internal::CompilationCache::Clear(); i::Heap::CollectAllGarbage(false); i::Heap::CollectAllGarbage(false); } THREADED_TEST(GlobalProperties) { v8::HandleScope scope; LocalContext env; v8::Handle<v8::Object> global = env->Global(); global->Set(v8_str("pi"), v8_num(3.1415926)); Local<Value> pi = global->Get(v8_str("pi")); CHECK_EQ(3.1415926, pi->NumberValue()); } static v8::Handle<Value> handle_call(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8_num(102); } static v8::Handle<Value> construct_call(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); args.This()->Set(v8_str("x"), v8_num(1)); args.This()->Set(v8_str("y"), v8_num(2)); return args.This(); } THREADED_TEST(FunctionTemplate) { v8::HandleScope scope; LocalContext env; { Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(handle_call); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("obj"), fun); Local<Script> script = v8_compile("obj()"); CHECK_EQ(102, script->Run()->Int32Value()); } // Use SetCallHandler to initialize a function template, should work like the // previous one. { Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(); fun_templ->SetCallHandler(handle_call); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("obj"), fun); Local<Script> script = v8_compile("obj()"); CHECK_EQ(102, script->Run()->Int32Value()); } // Test constructor calls. { Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(construct_call); fun_templ->SetClassName(v8_str("funky")); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("obj"), fun); Local<Script> script = v8_compile("var s = new obj(); s.x"); CHECK_EQ(1, script->Run()->Int32Value()); Local<Value> result = v8_compile("(new obj()).toString()")->Run(); CHECK_EQ(v8_str("[object funky]"), result); } } THREADED_TEST(FindInstanceInPrototypeChain) { v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> base = v8::FunctionTemplate::New(); Local<v8::FunctionTemplate> derived = v8::FunctionTemplate::New(); Local<v8::FunctionTemplate> other = v8::FunctionTemplate::New(); derived->Inherit(base); Local<v8::Function> base_function = base->GetFunction(); Local<v8::Function> derived_function = derived->GetFunction(); Local<v8::Function> other_function = other->GetFunction(); Local<v8::Object> base_instance = base_function->NewInstance(); Local<v8::Object> derived_instance = derived_function->NewInstance(); Local<v8::Object> derived_instance2 = derived_function->NewInstance(); Local<v8::Object> other_instance = other_function->NewInstance(); derived_instance2->Set(v8_str("__proto__"), derived_instance); other_instance->Set(v8_str("__proto__"), derived_instance2); // base_instance is only an instance of base. CHECK_EQ(base_instance, base_instance->FindInstanceInPrototypeChain(base)); CHECK(base_instance->FindInstanceInPrototypeChain(derived).IsEmpty()); CHECK(base_instance->FindInstanceInPrototypeChain(other).IsEmpty()); // derived_instance is an instance of base and derived. CHECK_EQ(derived_instance, derived_instance->FindInstanceInPrototypeChain(base)); CHECK_EQ(derived_instance, derived_instance->FindInstanceInPrototypeChain(derived)); CHECK(derived_instance->FindInstanceInPrototypeChain(other).IsEmpty()); // other_instance is an instance of other and its immediate // prototype derived_instance2 is an instance of base and derived. // Note, derived_instance is an instance of base and derived too, // but it comes after derived_instance2 in the prototype chain of // other_instance. CHECK_EQ(derived_instance2, other_instance->FindInstanceInPrototypeChain(base)); CHECK_EQ(derived_instance2, other_instance->FindInstanceInPrototypeChain(derived)); CHECK_EQ(other_instance, other_instance->FindInstanceInPrototypeChain(other)); } THREADED_TEST(TinyInteger) { v8::HandleScope scope; LocalContext env; int32_t value = 239; Local<v8::Integer> value_obj = v8::Integer::New(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } THREADED_TEST(BigSmiInteger) { v8::HandleScope scope; LocalContext env; int32_t value = i::Smi::kMaxValue; // We cannot add one to a Smi::kMaxValue without wrapping. if (i::kSmiValueSize < 32) { CHECK(i::Smi::IsValid(value)); CHECK(!i::Smi::IsValid(value + 1)); Local<v8::Integer> value_obj = v8::Integer::New(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } } THREADED_TEST(BigInteger) { v8::HandleScope scope; LocalContext env; // We cannot add one to a Smi::kMaxValue without wrapping. if (i::kSmiValueSize < 32) { // The casts allow this to compile, even if Smi::kMaxValue is 2^31-1. // The code will not be run in that case, due to the "if" guard. int32_t value = static_cast<int32_t>(static_cast<uint32_t>(i::Smi::kMaxValue) + 1); CHECK(value > i::Smi::kMaxValue); CHECK(!i::Smi::IsValid(value)); Local<v8::Integer> value_obj = v8::Integer::New(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } } THREADED_TEST(TinyUnsignedInteger) { v8::HandleScope scope; LocalContext env; uint32_t value = 239; Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } THREADED_TEST(BigUnsignedSmiInteger) { v8::HandleScope scope; LocalContext env; uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue); CHECK(i::Smi::IsValid(value)); CHECK(!i::Smi::IsValid(value + 1)); Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } THREADED_TEST(BigUnsignedInteger) { v8::HandleScope scope; LocalContext env; uint32_t value = static_cast<uint32_t>(i::Smi::kMaxValue) + 1; CHECK(value > static_cast<uint32_t>(i::Smi::kMaxValue)); CHECK(!i::Smi::IsValid(value)); Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } THREADED_TEST(OutOfSignedRangeUnsignedInteger) { v8::HandleScope scope; LocalContext env; uint32_t INT32_MAX_AS_UINT = (1U << 31) - 1; uint32_t value = INT32_MAX_AS_UINT + 1; CHECK(value > INT32_MAX_AS_UINT); // No overflow. Local<v8::Integer> value_obj = v8::Integer::NewFromUnsigned(value); CHECK_EQ(static_cast<int64_t>(value), value_obj->Value()); } THREADED_TEST(Number) { v8::HandleScope scope; LocalContext env; double PI = 3.1415926; Local<v8::Number> pi_obj = v8::Number::New(PI); CHECK_EQ(PI, pi_obj->NumberValue()); } THREADED_TEST(ToNumber) { v8::HandleScope scope; LocalContext env; Local<String> str = v8_str("3.1415926"); CHECK_EQ(3.1415926, str->NumberValue()); v8::Handle<v8::Boolean> t = v8::True(); CHECK_EQ(1.0, t->NumberValue()); v8::Handle<v8::Boolean> f = v8::False(); CHECK_EQ(0.0, f->NumberValue()); } THREADED_TEST(Date) { v8::HandleScope scope; LocalContext env; double PI = 3.1415926; Local<Value> date_obj = v8::Date::New(PI); CHECK_EQ(3.0, date_obj->NumberValue()); } THREADED_TEST(Boolean) { v8::HandleScope scope; LocalContext env; v8::Handle<v8::Boolean> t = v8::True(); CHECK(t->Value()); v8::Handle<v8::Boolean> f = v8::False(); CHECK(!f->Value()); v8::Handle<v8::Primitive> u = v8::Undefined(); CHECK(!u->BooleanValue()); v8::Handle<v8::Primitive> n = v8::Null(); CHECK(!n->BooleanValue()); v8::Handle<String> str1 = v8_str(""); CHECK(!str1->BooleanValue()); v8::Handle<String> str2 = v8_str("x"); CHECK(str2->BooleanValue()); CHECK(!v8::Number::New(0)->BooleanValue()); CHECK(v8::Number::New(-1)->BooleanValue()); CHECK(v8::Number::New(1)->BooleanValue()); CHECK(v8::Number::New(42)->BooleanValue()); CHECK(!v8_compile("NaN")->Run()->BooleanValue()); } static v8::Handle<Value> DummyCallHandler(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8_num(13.4); } static v8::Handle<Value> GetM(Local<String> name, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); return v8_num(876); } THREADED_TEST(GlobalPrototype) { v8::HandleScope scope; v8::Handle<v8::FunctionTemplate> func_templ = v8::FunctionTemplate::New(); func_templ->PrototypeTemplate()->Set( "dummy", v8::FunctionTemplate::New(DummyCallHandler)); v8::Handle<ObjectTemplate> templ = func_templ->InstanceTemplate(); templ->Set("x", v8_num(200)); templ->SetAccessor(v8_str("m"), GetM); LocalContext env(0, templ); v8::Handle<v8::Object> obj = env->Global(); v8::Handle<Script> script = v8_compile("dummy()"); v8::Handle<Value> result = script->Run(); CHECK_EQ(13.4, result->NumberValue()); CHECK_EQ(200, v8_compile("x")->Run()->Int32Value()); CHECK_EQ(876, v8_compile("m")->Run()->Int32Value()); } THREADED_TEST(ObjectTemplate) { v8::HandleScope scope; Local<ObjectTemplate> templ1 = ObjectTemplate::New(); templ1->Set("x", v8_num(10)); templ1->Set("y", v8_num(13)); LocalContext env; Local<v8::Object> instance1 = templ1->NewInstance(); env->Global()->Set(v8_str("p"), instance1); CHECK(v8_compile("(p.x == 10)")->Run()->BooleanValue()); CHECK(v8_compile("(p.y == 13)")->Run()->BooleanValue()); Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(); fun->PrototypeTemplate()->Set("nirk", v8_num(123)); Local<ObjectTemplate> templ2 = fun->InstanceTemplate(); templ2->Set("a", v8_num(12)); templ2->Set("b", templ1); Local<v8::Object> instance2 = templ2->NewInstance(); env->Global()->Set(v8_str("q"), instance2); CHECK(v8_compile("(q.nirk == 123)")->Run()->BooleanValue()); CHECK(v8_compile("(q.a == 12)")->Run()->BooleanValue()); CHECK(v8_compile("(q.b.x == 10)")->Run()->BooleanValue()); CHECK(v8_compile("(q.b.y == 13)")->Run()->BooleanValue()); } static v8::Handle<Value> GetFlabby(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8_num(17.2); } static v8::Handle<Value> GetKnurd(Local<String> property, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); return v8_num(15.2); } THREADED_TEST(DescriptorInheritance) { v8::HandleScope scope; v8::Handle<v8::FunctionTemplate> super = v8::FunctionTemplate::New(); super->PrototypeTemplate()->Set("flabby", v8::FunctionTemplate::New(GetFlabby)); super->PrototypeTemplate()->Set("PI", v8_num(3.14)); super->InstanceTemplate()->SetAccessor(v8_str("knurd"), GetKnurd); v8::Handle<v8::FunctionTemplate> base1 = v8::FunctionTemplate::New(); base1->Inherit(super); base1->PrototypeTemplate()->Set("v1", v8_num(20.1)); v8::Handle<v8::FunctionTemplate> base2 = v8::FunctionTemplate::New(); base2->Inherit(super); base2->PrototypeTemplate()->Set("v2", v8_num(10.1)); LocalContext env; env->Global()->Set(v8_str("s"), super->GetFunction()); env->Global()->Set(v8_str("base1"), base1->GetFunction()); env->Global()->Set(v8_str("base2"), base2->GetFunction()); // Checks right __proto__ chain. CHECK(CompileRun("base1.prototype.__proto__ == s.prototype")->BooleanValue()); CHECK(CompileRun("base2.prototype.__proto__ == s.prototype")->BooleanValue()); CHECK(v8_compile("s.prototype.PI == 3.14")->Run()->BooleanValue()); // Instance accessor should not be visible on function object or its prototype CHECK(CompileRun("s.knurd == undefined")->BooleanValue()); CHECK(CompileRun("s.prototype.knurd == undefined")->BooleanValue()); CHECK(CompileRun("base1.prototype.knurd == undefined")->BooleanValue()); env->Global()->Set(v8_str("obj"), base1->GetFunction()->NewInstance()); CHECK_EQ(17.2, v8_compile("obj.flabby()")->Run()->NumberValue()); CHECK(v8_compile("'flabby' in obj")->Run()->BooleanValue()); CHECK_EQ(15.2, v8_compile("obj.knurd")->Run()->NumberValue()); CHECK(v8_compile("'knurd' in obj")->Run()->BooleanValue()); CHECK_EQ(20.1, v8_compile("obj.v1")->Run()->NumberValue()); env->Global()->Set(v8_str("obj2"), base2->GetFunction()->NewInstance()); CHECK_EQ(17.2, v8_compile("obj2.flabby()")->Run()->NumberValue()); CHECK(v8_compile("'flabby' in obj2")->Run()->BooleanValue()); CHECK_EQ(15.2, v8_compile("obj2.knurd")->Run()->NumberValue()); CHECK(v8_compile("'knurd' in obj2")->Run()->BooleanValue()); CHECK_EQ(10.1, v8_compile("obj2.v2")->Run()->NumberValue()); // base1 and base2 cannot cross reference to each's prototype CHECK(v8_compile("obj.v2")->Run()->IsUndefined()); CHECK(v8_compile("obj2.v1")->Run()->IsUndefined()); } int echo_named_call_count; static v8::Handle<Value> EchoNamedProperty(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK_EQ(v8_str("data"), info.Data()); echo_named_call_count++; return name; } THREADED_TEST(NamedPropertyHandlerGetter) { echo_named_call_count = 0; v8::HandleScope scope; v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->InstanceTemplate()->SetNamedPropertyHandler(EchoNamedProperty, 0, 0, 0, 0, v8_str("data")); LocalContext env; env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance()); CHECK_EQ(echo_named_call_count, 0); v8_compile("obj.x")->Run(); CHECK_EQ(echo_named_call_count, 1); const char* code = "var str = 'oddle'; obj[str] + obj.poddle;"; v8::Handle<Value> str = CompileRun(code); String::AsciiValue value(str); CHECK_EQ(*value, "oddlepoddle"); // Check default behavior CHECK_EQ(v8_compile("obj.flob = 10;")->Run()->Int32Value(), 10); CHECK(v8_compile("'myProperty' in obj")->Run()->BooleanValue()); CHECK(v8_compile("delete obj.myProperty")->Run()->BooleanValue()); } int echo_indexed_call_count = 0; static v8::Handle<Value> EchoIndexedProperty(uint32_t index, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK_EQ(v8_num(637), info.Data()); echo_indexed_call_count++; return v8_num(index); } THREADED_TEST(IndexedPropertyHandlerGetter) { v8::HandleScope scope; v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->InstanceTemplate()->SetIndexedPropertyHandler(EchoIndexedProperty, 0, 0, 0, 0, v8_num(637)); LocalContext env; env->Global()->Set(v8_str("obj"), templ->GetFunction()->NewInstance()); Local<Script> script = v8_compile("obj[900]"); CHECK_EQ(script->Run()->Int32Value(), 900); } v8::Handle<v8::Object> bottom; static v8::Handle<Value> CheckThisIndexedPropertyHandler( uint32_t index, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<Value>(); } static v8::Handle<Value> CheckThisNamedPropertyHandler( Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<Value>(); } v8::Handle<Value> CheckThisIndexedPropertySetter(uint32_t index, Local<Value> value, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<Value>(); } v8::Handle<Value> CheckThisNamedPropertySetter(Local<String> property, Local<Value> value, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<Value>(); } v8::Handle<v8::Boolean> CheckThisIndexedPropertyQuery( uint32_t index, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<v8::Boolean>(); } v8::Handle<v8::Boolean> CheckThisNamedPropertyQuery(Local<String> property, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<v8::Boolean>(); } v8::Handle<v8::Boolean> CheckThisIndexedPropertyDeleter( uint32_t index, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<v8::Boolean>(); } v8::Handle<v8::Boolean> CheckThisNamedPropertyDeleter( Local<String> property, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<v8::Boolean>(); } v8::Handle<v8::Array> CheckThisIndexedPropertyEnumerator( const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<v8::Array>(); } v8::Handle<v8::Array> CheckThisNamedPropertyEnumerator( const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.This()->Equals(bottom)); return v8::Handle<v8::Array>(); } THREADED_TEST(PropertyHandlerInPrototype) { v8::HandleScope scope; LocalContext env; // Set up a prototype chain with three interceptors. v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->InstanceTemplate()->SetIndexedPropertyHandler( CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter, CheckThisIndexedPropertyQuery, CheckThisIndexedPropertyDeleter, CheckThisIndexedPropertyEnumerator); templ->InstanceTemplate()->SetNamedPropertyHandler( CheckThisNamedPropertyHandler, CheckThisNamedPropertySetter, CheckThisNamedPropertyQuery, CheckThisNamedPropertyDeleter, CheckThisNamedPropertyEnumerator); bottom = templ->GetFunction()->NewInstance(); Local<v8::Object> top = templ->GetFunction()->NewInstance(); Local<v8::Object> middle = templ->GetFunction()->NewInstance(); bottom->Set(v8_str("__proto__"), middle); middle->Set(v8_str("__proto__"), top); env->Global()->Set(v8_str("obj"), bottom); // Indexed and named get. Script::Compile(v8_str("obj[0]"))->Run(); Script::Compile(v8_str("obj.x"))->Run(); // Indexed and named set. Script::Compile(v8_str("obj[1] = 42"))->Run(); Script::Compile(v8_str("obj.y = 42"))->Run(); // Indexed and named query. Script::Compile(v8_str("0 in obj"))->Run(); Script::Compile(v8_str("'x' in obj"))->Run(); // Indexed and named deleter. Script::Compile(v8_str("delete obj[0]"))->Run(); Script::Compile(v8_str("delete obj.x"))->Run(); // Enumerators. Script::Compile(v8_str("for (var p in obj) ;"))->Run(); } static v8::Handle<Value> PrePropertyHandlerGet(Local<String> key, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); if (v8_str("pre")->Equals(key)) { return v8_str("PrePropertyHandler: pre"); } return v8::Handle<String>(); } static v8::Handle<v8::Boolean> PrePropertyHandlerHas(Local<String> key, const AccessorInfo&) { if (v8_str("pre")->Equals(key)) { return v8::True(); } return v8::Handle<v8::Boolean>(); // do not intercept the call } THREADED_TEST(PrePropertyHandler) { v8::HandleScope scope; v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(); desc->InstanceTemplate()->SetNamedPropertyHandler(PrePropertyHandlerGet, 0, PrePropertyHandlerHas); LocalContext env(NULL, desc->InstanceTemplate()); Script::Compile(v8_str( "var pre = 'Object: pre'; var on = 'Object: on';"))->Run(); v8::Handle<Value> result_pre = Script::Compile(v8_str("pre"))->Run(); CHECK_EQ(v8_str("PrePropertyHandler: pre"), result_pre); v8::Handle<Value> result_on = Script::Compile(v8_str("on"))->Run(); CHECK_EQ(v8_str("Object: on"), result_on); v8::Handle<Value> result_post = Script::Compile(v8_str("post"))->Run(); CHECK(result_post.IsEmpty()); } THREADED_TEST(UndefinedIsNotEnumerable) { v8::HandleScope scope; LocalContext env; v8::Handle<Value> result = Script::Compile(v8_str( "this.propertyIsEnumerable(undefined)"))->Run(); CHECK(result->IsFalse()); } v8::Handle<Script> call_recursively_script; static const int kTargetRecursionDepth = 200; // near maximum static v8::Handle<Value> CallScriptRecursivelyCall(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); int depth = args.This()->Get(v8_str("depth"))->Int32Value(); if (depth == kTargetRecursionDepth) return v8::Undefined(); args.This()->Set(v8_str("depth"), v8::Integer::New(depth + 1)); return call_recursively_script->Run(); } static v8::Handle<Value> CallFunctionRecursivelyCall( const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); int depth = args.This()->Get(v8_str("depth"))->Int32Value(); if (depth == kTargetRecursionDepth) { printf("[depth = %d]\n", depth); return v8::Undefined(); } args.This()->Set(v8_str("depth"), v8::Integer::New(depth + 1)); v8::Handle<Value> function = args.This()->Get(v8_str("callFunctionRecursively")); return v8::Handle<Function>::Cast(function)->Call(args.This(), 0, NULL); } THREADED_TEST(DeepCrossLanguageRecursion) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New(); global->Set(v8_str("callScriptRecursively"), v8::FunctionTemplate::New(CallScriptRecursivelyCall)); global->Set(v8_str("callFunctionRecursively"), v8::FunctionTemplate::New(CallFunctionRecursivelyCall)); LocalContext env(NULL, global); env->Global()->Set(v8_str("depth"), v8::Integer::New(0)); call_recursively_script = v8_compile("callScriptRecursively()"); v8::Handle<Value> result = call_recursively_script->Run(); call_recursively_script = v8::Handle<Script>(); env->Global()->Set(v8_str("depth"), v8::Integer::New(0)); Script::Compile(v8_str("callFunctionRecursively()"))->Run(); } static v8::Handle<Value> ThrowingPropertyHandlerGet(Local<String> key, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); return v8::ThrowException(key); } static v8::Handle<Value> ThrowingPropertyHandlerSet(Local<String> key, Local<Value>, const AccessorInfo&) { v8::ThrowException(key); return v8::Undefined(); // not the same as v8::Handle<v8::Value>() } THREADED_TEST(CallbackExceptionRegression) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); obj->SetNamedPropertyHandler(ThrowingPropertyHandlerGet, ThrowingPropertyHandlerSet); LocalContext env; env->Global()->Set(v8_str("obj"), obj->NewInstance()); v8::Handle<Value> otto = Script::Compile(v8_str( "try { with (obj) { otto; } } catch (e) { e; }"))->Run(); CHECK_EQ(v8_str("otto"), otto); v8::Handle<Value> netto = Script::Compile(v8_str( "try { with (obj) { netto = 4; } } catch (e) { e; }"))->Run(); CHECK_EQ(v8_str("netto"), netto); } THREADED_TEST(FunctionPrototype) { v8::HandleScope scope; Local<v8::FunctionTemplate> Foo = v8::FunctionTemplate::New(); Foo->PrototypeTemplate()->Set(v8_str("plak"), v8_num(321)); LocalContext env; env->Global()->Set(v8_str("Foo"), Foo->GetFunction()); Local<Script> script = Script::Compile(v8_str("Foo.prototype.plak")); CHECK_EQ(script->Run()->Int32Value(), 321); } THREADED_TEST(InternalFields) { v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate(); instance_templ->SetInternalFieldCount(1); Local<v8::Object> obj = templ->GetFunction()->NewInstance(); CHECK_EQ(1, obj->InternalFieldCount()); CHECK(obj->GetInternalField(0)->IsUndefined()); obj->SetInternalField(0, v8_num(17)); CHECK_EQ(17, obj->GetInternalField(0)->Int32Value()); } THREADED_TEST(InternalFieldsNativePointers) { v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate(); instance_templ->SetInternalFieldCount(1); Local<v8::Object> obj = templ->GetFunction()->NewInstance(); CHECK_EQ(1, obj->InternalFieldCount()); CHECK(obj->GetPointerFromInternalField(0) == NULL); char* data = new char[100]; void* aligned = data; CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1); void* unaligned = data + 1; CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1); // Check reading and writing aligned pointers. obj->SetPointerInInternalField(0, aligned); i::Heap::CollectAllGarbage(false); CHECK_EQ(aligned, obj->GetPointerFromInternalField(0)); // Check reading and writing unaligned pointers. obj->SetPointerInInternalField(0, unaligned); i::Heap::CollectAllGarbage(false); CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0)); delete[] data; } THREADED_TEST(InternalFieldsNativePointersAndExternal) { v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate(); instance_templ->SetInternalFieldCount(1); Local<v8::Object> obj = templ->GetFunction()->NewInstance(); CHECK_EQ(1, obj->InternalFieldCount()); CHECK(obj->GetPointerFromInternalField(0) == NULL); char* data = new char[100]; void* aligned = data; CHECK_EQ(0, reinterpret_cast<uintptr_t>(aligned) & 0x1); void* unaligned = data + 1; CHECK_EQ(1, reinterpret_cast<uintptr_t>(unaligned) & 0x1); obj->SetPointerInInternalField(0, aligned); i::Heap::CollectAllGarbage(false); CHECK_EQ(aligned, v8::External::Unwrap(obj->GetInternalField(0))); obj->SetPointerInInternalField(0, unaligned); i::Heap::CollectAllGarbage(false); CHECK_EQ(unaligned, v8::External::Unwrap(obj->GetInternalField(0))); obj->SetInternalField(0, v8::External::Wrap(aligned)); i::Heap::CollectAllGarbage(false); CHECK_EQ(aligned, obj->GetPointerFromInternalField(0)); obj->SetInternalField(0, v8::External::Wrap(unaligned)); i::Heap::CollectAllGarbage(false); CHECK_EQ(unaligned, obj->GetPointerFromInternalField(0)); delete[] data; } THREADED_TEST(IdentityHash) { v8::HandleScope scope; LocalContext env; // Ensure that the test starts with an fresh heap to test whether the hash // code is based on the address. i::Heap::CollectAllGarbage(false); Local<v8::Object> obj = v8::Object::New(); int hash = obj->GetIdentityHash(); int hash1 = obj->GetIdentityHash(); CHECK_EQ(hash, hash1); int hash2 = v8::Object::New()->GetIdentityHash(); // Since the identity hash is essentially a random number two consecutive // objects should not be assigned the same hash code. If the test below fails // the random number generator should be evaluated. CHECK_NE(hash, hash2); i::Heap::CollectAllGarbage(false); int hash3 = v8::Object::New()->GetIdentityHash(); // Make sure that the identity hash is not based on the initial address of // the object alone. If the test below fails the random number generator // should be evaluated. CHECK_NE(hash, hash3); int hash4 = obj->GetIdentityHash(); CHECK_EQ(hash, hash4); } THREADED_TEST(HiddenProperties) { v8::HandleScope scope; LocalContext env; v8::Local<v8::Object> obj = v8::Object::New(); v8::Local<v8::String> key = v8_str("api-test::hidden-key"); v8::Local<v8::String> empty = v8_str(""); v8::Local<v8::String> prop_name = v8_str("prop_name"); i::Heap::CollectAllGarbage(false); // Make sure delete of a non-existent hidden value works CHECK(obj->DeleteHiddenValue(key)); CHECK(obj->SetHiddenValue(key, v8::Integer::New(1503))); CHECK_EQ(1503, obj->GetHiddenValue(key)->Int32Value()); CHECK(obj->SetHiddenValue(key, v8::Integer::New(2002))); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); i::Heap::CollectAllGarbage(false); // Make sure we do not find the hidden property. CHECK(!obj->Has(empty)); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); CHECK(obj->Get(empty)->IsUndefined()); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); CHECK(obj->Set(empty, v8::Integer::New(2003))); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); CHECK_EQ(2003, obj->Get(empty)->Int32Value()); i::Heap::CollectAllGarbage(false); // Add another property and delete it afterwards to force the object in // slow case. CHECK(obj->Set(prop_name, v8::Integer::New(2008))); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); CHECK_EQ(2008, obj->Get(prop_name)->Int32Value()); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); CHECK(obj->Delete(prop_name)); CHECK_EQ(2002, obj->GetHiddenValue(key)->Int32Value()); i::Heap::CollectAllGarbage(false); CHECK(obj->DeleteHiddenValue(key)); CHECK(obj->GetHiddenValue(key).IsEmpty()); } static bool interceptor_for_hidden_properties_called; static v8::Handle<Value> InterceptorForHiddenProperties( Local<String> name, const AccessorInfo& info) { interceptor_for_hidden_properties_called = true; return v8::Handle<Value>(); } THREADED_TEST(HiddenPropertiesWithInterceptors) { v8::HandleScope scope; LocalContext context; interceptor_for_hidden_properties_called = false; v8::Local<v8::String> key = v8_str("api-test::hidden-key"); // Associate an interceptor with an object and start setting hidden values. Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(); Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate(); instance_templ->SetNamedPropertyHandler(InterceptorForHiddenProperties); Local<v8::Function> function = fun_templ->GetFunction(); Local<v8::Object> obj = function->NewInstance(); CHECK(obj->SetHiddenValue(key, v8::Integer::New(2302))); CHECK_EQ(2302, obj->GetHiddenValue(key)->Int32Value()); CHECK(!interceptor_for_hidden_properties_called); } THREADED_TEST(External) { v8::HandleScope scope; int x = 3; Local<v8::External> ext = v8::External::New(&x); LocalContext env; env->Global()->Set(v8_str("ext"), ext); Local<Value> reext_obj = Script::Compile(v8_str("this.ext"))->Run(); v8::Handle<v8::External> reext = v8::Handle<v8::External>::Cast(reext_obj); int* ptr = static_cast<int*>(reext->Value()); CHECK_EQ(x, 3); *ptr = 10; CHECK_EQ(x, 10); // Make sure unaligned pointers are wrapped properly. char* data = i::StrDup("0123456789"); Local<v8::Value> zero = v8::External::Wrap(&data[0]); Local<v8::Value> one = v8::External::Wrap(&data[1]); Local<v8::Value> two = v8::External::Wrap(&data[2]); Local<v8::Value> three = v8::External::Wrap(&data[3]); char* char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(zero)); CHECK_EQ('0', *char_ptr); char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(one)); CHECK_EQ('1', *char_ptr); char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(two)); CHECK_EQ('2', *char_ptr); char_ptr = reinterpret_cast<char*>(v8::External::Unwrap(three)); CHECK_EQ('3', *char_ptr); i::DeleteArray(data); } THREADED_TEST(GlobalHandle) { v8::Persistent<String> global; { v8::HandleScope scope; Local<String> str = v8_str("str"); global = v8::Persistent<String>::New(str); } CHECK_EQ(global->Length(), 3); global.Dispose(); } THREADED_TEST(ScriptException) { v8::HandleScope scope; LocalContext env; Local<Script> script = Script::Compile(v8_str("throw 'panama!';")); v8::TryCatch try_catch; Local<Value> result = script->Run(); CHECK(result.IsEmpty()); CHECK(try_catch.HasCaught()); String::AsciiValue exception_value(try_catch.Exception()); CHECK_EQ(*exception_value, "panama!"); } bool message_received; static void check_message(v8::Handle<v8::Message> message, v8::Handle<Value> data) { CHECK_EQ(5.76, data->NumberValue()); CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue()); CHECK_EQ(7.56, message->GetScriptData()->NumberValue()); message_received = true; } THREADED_TEST(MessageHandlerData) { message_received = false; v8::HandleScope scope; CHECK(!message_received); v8::V8::AddMessageListener(check_message, v8_num(5.76)); LocalContext context; v8::ScriptOrigin origin = v8::ScriptOrigin(v8_str("6.75")); v8::Handle<v8::Script> script = Script::Compile(v8_str("throw 'error'"), &origin); script->SetData(v8_str("7.56")); script->Run(); CHECK(message_received); // clear out the message listener v8::V8::RemoveMessageListeners(check_message); } THREADED_TEST(GetSetProperty) { v8::HandleScope scope; LocalContext context; context->Global()->Set(v8_str("foo"), v8_num(14)); context->Global()->Set(v8_str("12"), v8_num(92)); context->Global()->Set(v8::Integer::New(16), v8_num(32)); context->Global()->Set(v8_num(13), v8_num(56)); Local<Value> foo = Script::Compile(v8_str("this.foo"))->Run(); CHECK_EQ(14, foo->Int32Value()); Local<Value> twelve = Script::Compile(v8_str("this[12]"))->Run(); CHECK_EQ(92, twelve->Int32Value()); Local<Value> sixteen = Script::Compile(v8_str("this[16]"))->Run(); CHECK_EQ(32, sixteen->Int32Value()); Local<Value> thirteen = Script::Compile(v8_str("this[13]"))->Run(); CHECK_EQ(56, thirteen->Int32Value()); CHECK_EQ(92, context->Global()->Get(v8::Integer::New(12))->Int32Value()); CHECK_EQ(92, context->Global()->Get(v8_str("12"))->Int32Value()); CHECK_EQ(92, context->Global()->Get(v8_num(12))->Int32Value()); CHECK_EQ(32, context->Global()->Get(v8::Integer::New(16))->Int32Value()); CHECK_EQ(32, context->Global()->Get(v8_str("16"))->Int32Value()); CHECK_EQ(32, context->Global()->Get(v8_num(16))->Int32Value()); CHECK_EQ(56, context->Global()->Get(v8::Integer::New(13))->Int32Value()); CHECK_EQ(56, context->Global()->Get(v8_str("13"))->Int32Value()); CHECK_EQ(56, context->Global()->Get(v8_num(13))->Int32Value()); } THREADED_TEST(PropertyAttributes) { v8::HandleScope scope; LocalContext context; // read-only Local<String> prop = v8_str("read_only"); context->Global()->Set(prop, v8_num(7), v8::ReadOnly); CHECK_EQ(7, context->Global()->Get(prop)->Int32Value()); Script::Compile(v8_str("read_only = 9"))->Run(); CHECK_EQ(7, context->Global()->Get(prop)->Int32Value()); context->Global()->Set(prop, v8_num(10)); CHECK_EQ(7, context->Global()->Get(prop)->Int32Value()); // dont-delete prop = v8_str("dont_delete"); context->Global()->Set(prop, v8_num(13), v8::DontDelete); CHECK_EQ(13, context->Global()->Get(prop)->Int32Value()); Script::Compile(v8_str("delete dont_delete"))->Run(); CHECK_EQ(13, context->Global()->Get(prop)->Int32Value()); } THREADED_TEST(Array) { v8::HandleScope scope; LocalContext context; Local<v8::Array> array = v8::Array::New(); CHECK_EQ(0, array->Length()); CHECK(array->Get(v8::Integer::New(0))->IsUndefined()); CHECK(!array->Has(0)); CHECK(array->Get(v8::Integer::New(100))->IsUndefined()); CHECK(!array->Has(100)); array->Set(v8::Integer::New(2), v8_num(7)); CHECK_EQ(3, array->Length()); CHECK(!array->Has(0)); CHECK(!array->Has(1)); CHECK(array->Has(2)); CHECK_EQ(7, array->Get(v8::Integer::New(2))->Int32Value()); Local<Value> obj = Script::Compile(v8_str("[1, 2, 3]"))->Run(); Local<v8::Array> arr = Local<v8::Array>::Cast(obj); CHECK_EQ(3, arr->Length()); CHECK_EQ(1, arr->Get(v8::Integer::New(0))->Int32Value()); CHECK_EQ(2, arr->Get(v8::Integer::New(1))->Int32Value()); CHECK_EQ(3, arr->Get(v8::Integer::New(2))->Int32Value()); } v8::Handle<Value> HandleF(const v8::Arguments& args) { v8::HandleScope scope; ApiTestFuzzer::Fuzz(); Local<v8::Array> result = v8::Array::New(args.Length()); for (int i = 0; i < args.Length(); i++) result->Set(v8::Integer::New(i), args[i]); return scope.Close(result); } THREADED_TEST(Vector) { v8::HandleScope scope; Local<ObjectTemplate> global = ObjectTemplate::New(); global->Set(v8_str("f"), v8::FunctionTemplate::New(HandleF)); LocalContext context(0, global); const char* fun = "f()"; Local<v8::Array> a0 = Local<v8::Array>::Cast(Script::Compile(String::New(fun))->Run()); CHECK_EQ(0, a0->Length()); const char* fun2 = "f(11)"; Local<v8::Array> a1 = Local<v8::Array>::Cast(Script::Compile(String::New(fun2))->Run()); CHECK_EQ(1, a1->Length()); CHECK_EQ(11, a1->Get(v8::Integer::New(0))->Int32Value()); const char* fun3 = "f(12, 13)"; Local<v8::Array> a2 = Local<v8::Array>::Cast(Script::Compile(String::New(fun3))->Run()); CHECK_EQ(2, a2->Length()); CHECK_EQ(12, a2->Get(v8::Integer::New(0))->Int32Value()); CHECK_EQ(13, a2->Get(v8::Integer::New(1))->Int32Value()); const char* fun4 = "f(14, 15, 16)"; Local<v8::Array> a3 = Local<v8::Array>::Cast(Script::Compile(String::New(fun4))->Run()); CHECK_EQ(3, a3->Length()); CHECK_EQ(14, a3->Get(v8::Integer::New(0))->Int32Value()); CHECK_EQ(15, a3->Get(v8::Integer::New(1))->Int32Value()); CHECK_EQ(16, a3->Get(v8::Integer::New(2))->Int32Value()); const char* fun5 = "f(17, 18, 19, 20)"; Local<v8::Array> a4 = Local<v8::Array>::Cast(Script::Compile(String::New(fun5))->Run()); CHECK_EQ(4, a4->Length()); CHECK_EQ(17, a4->Get(v8::Integer::New(0))->Int32Value()); CHECK_EQ(18, a4->Get(v8::Integer::New(1))->Int32Value()); CHECK_EQ(19, a4->Get(v8::Integer::New(2))->Int32Value()); CHECK_EQ(20, a4->Get(v8::Integer::New(3))->Int32Value()); } THREADED_TEST(FunctionCall) { v8::HandleScope scope; LocalContext context; CompileRun( "function Foo() {" " var result = [];" " for (var i = 0; i < arguments.length; i++) {" " result.push(arguments[i]);" " }" " return result;" "}"); Local<Function> Foo = Local<Function>::Cast(context->Global()->Get(v8_str("Foo"))); v8::Handle<Value>* args0 = NULL; Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->Call(Foo, 0, args0)); CHECK_EQ(0, a0->Length()); v8::Handle<Value> args1[] = { v8_num(1.1) }; Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->Call(Foo, 1, args1)); CHECK_EQ(1, a1->Length()); CHECK_EQ(1.1, a1->Get(v8::Integer::New(0))->NumberValue()); v8::Handle<Value> args2[] = { v8_num(2.2), v8_num(3.3) }; Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->Call(Foo, 2, args2)); CHECK_EQ(2, a2->Length()); CHECK_EQ(2.2, a2->Get(v8::Integer::New(0))->NumberValue()); CHECK_EQ(3.3, a2->Get(v8::Integer::New(1))->NumberValue()); v8::Handle<Value> args3[] = { v8_num(4.4), v8_num(5.5), v8_num(6.6) }; Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->Call(Foo, 3, args3)); CHECK_EQ(3, a3->Length()); CHECK_EQ(4.4, a3->Get(v8::Integer::New(0))->NumberValue()); CHECK_EQ(5.5, a3->Get(v8::Integer::New(1))->NumberValue()); CHECK_EQ(6.6, a3->Get(v8::Integer::New(2))->NumberValue()); v8::Handle<Value> args4[] = { v8_num(7.7), v8_num(8.8), v8_num(9.9), v8_num(10.11) }; Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->Call(Foo, 4, args4)); CHECK_EQ(4, a4->Length()); CHECK_EQ(7.7, a4->Get(v8::Integer::New(0))->NumberValue()); CHECK_EQ(8.8, a4->Get(v8::Integer::New(1))->NumberValue()); CHECK_EQ(9.9, a4->Get(v8::Integer::New(2))->NumberValue()); CHECK_EQ(10.11, a4->Get(v8::Integer::New(3))->NumberValue()); } static const char* js_code_causing_out_of_memory = "var a = new Array(); while(true) a.push(a);"; // These tests run for a long time and prevent us from running tests // that come after them so they cannot run in parallel. TEST(OutOfMemory) { // It's not possible to read a snapshot into a heap with different dimensions. if (v8::internal::Snapshot::IsEnabled()) return; // Set heap limits. static const int K = 1024; v8::ResourceConstraints constraints; constraints.set_max_young_space_size(256 * K); constraints.set_max_old_space_size(4 * K * K); v8::SetResourceConstraints(&constraints); // Execute a script that causes out of memory. v8::HandleScope scope; LocalContext context; v8::V8::IgnoreOutOfMemoryException(); Local<Script> script = Script::Compile(String::New(js_code_causing_out_of_memory)); Local<Value> result = script->Run(); // Check for out of memory state. CHECK(result.IsEmpty()); CHECK(context->HasOutOfMemoryException()); } v8::Handle<Value> ProvokeOutOfMemory(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); v8::HandleScope scope; LocalContext context; Local<Script> script = Script::Compile(String::New(js_code_causing_out_of_memory)); Local<Value> result = script->Run(); // Check for out of memory state. CHECK(result.IsEmpty()); CHECK(context->HasOutOfMemoryException()); return result; } TEST(OutOfMemoryNested) { // It's not possible to read a snapshot into a heap with different dimensions. if (v8::internal::Snapshot::IsEnabled()) return; // Set heap limits. static const int K = 1024; v8::ResourceConstraints constraints; constraints.set_max_young_space_size(256 * K); constraints.set_max_old_space_size(4 * K * K); v8::SetResourceConstraints(&constraints); v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("ProvokeOutOfMemory"), v8::FunctionTemplate::New(ProvokeOutOfMemory)); LocalContext context(0, templ); v8::V8::IgnoreOutOfMemoryException(); Local<Value> result = CompileRun( "var thrown = false;" "try {" " ProvokeOutOfMemory();" "} catch (e) {" " thrown = true;" "}"); // Check for out of memory state. CHECK(result.IsEmpty()); CHECK(context->HasOutOfMemoryException()); } TEST(HugeConsStringOutOfMemory) { // It's not possible to read a snapshot into a heap with different dimensions. if (v8::internal::Snapshot::IsEnabled()) return; v8::HandleScope scope; LocalContext context; // Set heap limits. static const int K = 1024; v8::ResourceConstraints constraints; constraints.set_max_young_space_size(256 * K); constraints.set_max_old_space_size(2 * K * K); v8::SetResourceConstraints(&constraints); // Execute a script that causes out of memory. v8::V8::IgnoreOutOfMemoryException(); // Build huge string. This should fail with out of memory exception. Local<Value> result = CompileRun( "var str = Array.prototype.join.call({length: 513}, \"A\").toUpperCase();" "for (var i = 0; i < 22; i++) { str = str + str; }"); // Check for out of memory state. CHECK(result.IsEmpty()); CHECK(context->HasOutOfMemoryException()); } THREADED_TEST(ConstructCall) { v8::HandleScope scope; LocalContext context; CompileRun( "function Foo() {" " var result = [];" " for (var i = 0; i < arguments.length; i++) {" " result.push(arguments[i]);" " }" " return result;" "}"); Local<Function> Foo = Local<Function>::Cast(context->Global()->Get(v8_str("Foo"))); v8::Handle<Value>* args0 = NULL; Local<v8::Array> a0 = Local<v8::Array>::Cast(Foo->NewInstance(0, args0)); CHECK_EQ(0, a0->Length()); v8::Handle<Value> args1[] = { v8_num(1.1) }; Local<v8::Array> a1 = Local<v8::Array>::Cast(Foo->NewInstance(1, args1)); CHECK_EQ(1, a1->Length()); CHECK_EQ(1.1, a1->Get(v8::Integer::New(0))->NumberValue()); v8::Handle<Value> args2[] = { v8_num(2.2), v8_num(3.3) }; Local<v8::Array> a2 = Local<v8::Array>::Cast(Foo->NewInstance(2, args2)); CHECK_EQ(2, a2->Length()); CHECK_EQ(2.2, a2->Get(v8::Integer::New(0))->NumberValue()); CHECK_EQ(3.3, a2->Get(v8::Integer::New(1))->NumberValue()); v8::Handle<Value> args3[] = { v8_num(4.4), v8_num(5.5), v8_num(6.6) }; Local<v8::Array> a3 = Local<v8::Array>::Cast(Foo->NewInstance(3, args3)); CHECK_EQ(3, a3->Length()); CHECK_EQ(4.4, a3->Get(v8::Integer::New(0))->NumberValue()); CHECK_EQ(5.5, a3->Get(v8::Integer::New(1))->NumberValue()); CHECK_EQ(6.6, a3->Get(v8::Integer::New(2))->NumberValue()); v8::Handle<Value> args4[] = { v8_num(7.7), v8_num(8.8), v8_num(9.9), v8_num(10.11) }; Local<v8::Array> a4 = Local<v8::Array>::Cast(Foo->NewInstance(4, args4)); CHECK_EQ(4, a4->Length()); CHECK_EQ(7.7, a4->Get(v8::Integer::New(0))->NumberValue()); CHECK_EQ(8.8, a4->Get(v8::Integer::New(1))->NumberValue()); CHECK_EQ(9.9, a4->Get(v8::Integer::New(2))->NumberValue()); CHECK_EQ(10.11, a4->Get(v8::Integer::New(3))->NumberValue()); } static void CheckUncle(v8::TryCatch* try_catch) { CHECK(try_catch->HasCaught()); String::AsciiValue str_value(try_catch->Exception()); CHECK_EQ(*str_value, "uncle?"); try_catch->Reset(); } THREADED_TEST(ConversionException) { v8::HandleScope scope; LocalContext env; CompileRun( "function TestClass() { };" "TestClass.prototype.toString = function () { throw 'uncle?'; };" "var obj = new TestClass();"); Local<Value> obj = env->Global()->Get(v8_str("obj")); v8::TryCatch try_catch; Local<Value> to_string_result = obj->ToString(); CHECK(to_string_result.IsEmpty()); CheckUncle(&try_catch); Local<Value> to_number_result = obj->ToNumber(); CHECK(to_number_result.IsEmpty()); CheckUncle(&try_catch); Local<Value> to_integer_result = obj->ToInteger(); CHECK(to_integer_result.IsEmpty()); CheckUncle(&try_catch); Local<Value> to_uint32_result = obj->ToUint32(); CHECK(to_uint32_result.IsEmpty()); CheckUncle(&try_catch); Local<Value> to_int32_result = obj->ToInt32(); CHECK(to_int32_result.IsEmpty()); CheckUncle(&try_catch); Local<Value> to_object_result = v8::Undefined()->ToObject(); CHECK(to_object_result.IsEmpty()); CHECK(try_catch.HasCaught()); try_catch.Reset(); int32_t int32_value = obj->Int32Value(); CHECK_EQ(0, int32_value); CheckUncle(&try_catch); uint32_t uint32_value = obj->Uint32Value(); CHECK_EQ(0, uint32_value); CheckUncle(&try_catch); double number_value = obj->NumberValue(); CHECK_NE(0, IsNaN(number_value)); CheckUncle(&try_catch); int64_t integer_value = obj->IntegerValue(); CHECK_EQ(0.0, static_cast<double>(integer_value)); CheckUncle(&try_catch); } v8::Handle<Value> ThrowFromC(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8::ThrowException(v8_str("konto")); } v8::Handle<Value> CCatcher(const v8::Arguments& args) { if (args.Length() < 1) return v8::Boolean::New(false); v8::HandleScope scope; v8::TryCatch try_catch; Local<Value> result = v8::Script::Compile(args[0]->ToString())->Run(); CHECK(!try_catch.HasCaught() || result.IsEmpty()); return v8::Boolean::New(try_catch.HasCaught()); } THREADED_TEST(APICatch) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("ThrowFromC"), v8::FunctionTemplate::New(ThrowFromC)); LocalContext context(0, templ); CompileRun( "var thrown = false;" "try {" " ThrowFromC();" "} catch (e) {" " thrown = true;" "}"); Local<Value> thrown = context->Global()->Get(v8_str("thrown")); CHECK(thrown->BooleanValue()); } THREADED_TEST(APIThrowTryCatch) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("ThrowFromC"), v8::FunctionTemplate::New(ThrowFromC)); LocalContext context(0, templ); v8::TryCatch try_catch; CompileRun("ThrowFromC();"); CHECK(try_catch.HasCaught()); } // Test that a try-finally block doesn't shadow a try-catch block // when setting up an external handler. // // BUG(271): Some of the exception propagation does not work on the // ARM simulator because the simulator separates the C++ stack and the // JS stack. This test therefore fails on the simulator. The test is // not threaded to allow the threading tests to run on the simulator. TEST(TryCatchInTryFinally) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("CCatcher"), v8::FunctionTemplate::New(CCatcher)); LocalContext context(0, templ); Local<Value> result = CompileRun("try {" " try {" " CCatcher('throw 7;');" " } finally {" " }" "} catch (e) {" "}"); CHECK(result->IsTrue()); } static void receive_message(v8::Handle<v8::Message> message, v8::Handle<v8::Value> data) { message->Get(); message_received = true; } TEST(APIThrowMessage) { message_received = false; v8::HandleScope scope; v8::V8::AddMessageListener(receive_message); Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("ThrowFromC"), v8::FunctionTemplate::New(ThrowFromC)); LocalContext context(0, templ); CompileRun("ThrowFromC();"); CHECK(message_received); v8::V8::RemoveMessageListeners(check_message); } TEST(APIThrowMessageAndVerboseTryCatch) { message_received = false; v8::HandleScope scope; v8::V8::AddMessageListener(receive_message); Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("ThrowFromC"), v8::FunctionTemplate::New(ThrowFromC)); LocalContext context(0, templ); v8::TryCatch try_catch; try_catch.SetVerbose(true); Local<Value> result = CompileRun("ThrowFromC();"); CHECK(try_catch.HasCaught()); CHECK(result.IsEmpty()); CHECK(message_received); v8::V8::RemoveMessageListeners(check_message); } THREADED_TEST(ExternalScriptException) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("ThrowFromC"), v8::FunctionTemplate::New(ThrowFromC)); LocalContext context(0, templ); v8::TryCatch try_catch; Local<Script> script = Script::Compile(v8_str("ThrowFromC(); throw 'panama';")); Local<Value> result = script->Run(); CHECK(result.IsEmpty()); CHECK(try_catch.HasCaught()); String::AsciiValue exception_value(try_catch.Exception()); CHECK_EQ("konto", *exception_value); } v8::Handle<Value> CThrowCountDown(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); CHECK_EQ(4, args.Length()); int count = args[0]->Int32Value(); int cInterval = args[2]->Int32Value(); if (count == 0) { return v8::ThrowException(v8_str("FromC")); } else { Local<v8::Object> global = Context::GetCurrent()->Global(); Local<Value> fun = global->Get(v8_str("JSThrowCountDown")); v8::Handle<Value> argv[] = { v8_num(count - 1), args[1], args[2], args[3] }; if (count % cInterval == 0) { v8::TryCatch try_catch; Local<Value> result = v8::Handle<Function>::Cast(fun)->Call(global, 4, argv); int expected = args[3]->Int32Value(); if (try_catch.HasCaught()) { CHECK_EQ(expected, count); CHECK(result.IsEmpty()); CHECK(!i::Top::has_scheduled_exception()); } else { CHECK_NE(expected, count); } return result; } else { return v8::Handle<Function>::Cast(fun)->Call(global, 4, argv); } } } v8::Handle<Value> JSCheck(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); CHECK_EQ(3, args.Length()); bool equality = args[0]->BooleanValue(); int count = args[1]->Int32Value(); int expected = args[2]->Int32Value(); if (equality) { CHECK_EQ(count, expected); } else { CHECK_NE(count, expected); } return v8::Undefined(); } THREADED_TEST(EvalInTryFinally) { v8::HandleScope scope; LocalContext context; v8::TryCatch try_catch; CompileRun("(function() {" " try {" " eval('asldkf (*&^&*^');" " } finally {" " return;" " }" "})()"); CHECK(!try_catch.HasCaught()); } // This test works by making a stack of alternating JavaScript and C // activations. These activations set up exception handlers with regular // intervals, one interval for C activations and another for JavaScript // activations. When enough activations have been created an exception is // thrown and we check that the right activation catches the exception and that // no other activations do. The right activation is always the topmost one with // a handler, regardless of whether it is in JavaScript or C. // // The notation used to describe a test case looks like this: // // *JS[4] *C[3] @JS[2] C[1] JS[0] // // Each entry is an activation, either JS or C. The index is the count at that // level. Stars identify activations with exception handlers, the @ identifies // the exception handler that should catch the exception. // // BUG(271): Some of the exception propagation does not work on the // ARM simulator because the simulator separates the C++ stack and the // JS stack. This test therefore fails on the simulator. The test is // not threaded to allow the threading tests to run on the simulator. TEST(ExceptionOrder) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("check"), v8::FunctionTemplate::New(JSCheck)); templ->Set(v8_str("CThrowCountDown"), v8::FunctionTemplate::New(CThrowCountDown)); LocalContext context(0, templ); CompileRun( "function JSThrowCountDown(count, jsInterval, cInterval, expected) {" " if (count == 0) throw 'FromJS';" " if (count % jsInterval == 0) {" " try {" " var value = CThrowCountDown(count - 1," " jsInterval," " cInterval," " expected);" " check(false, count, expected);" " return value;" " } catch (e) {" " check(true, count, expected);" " }" " } else {" " return CThrowCountDown(count - 1, jsInterval, cInterval, expected);" " }" "}"); Local<Function> fun = Local<Function>::Cast(context->Global()->Get(v8_str("JSThrowCountDown"))); const int argc = 4; // count jsInterval cInterval expected // *JS[4] *C[3] @JS[2] C[1] JS[0] v8::Handle<Value> a0[argc] = { v8_num(4), v8_num(2), v8_num(3), v8_num(2) }; fun->Call(fun, argc, a0); // JS[5] *C[4] JS[3] @C[2] JS[1] C[0] v8::Handle<Value> a1[argc] = { v8_num(5), v8_num(6), v8_num(1), v8_num(2) }; fun->Call(fun, argc, a1); // JS[6] @C[5] JS[4] C[3] JS[2] C[1] JS[0] v8::Handle<Value> a2[argc] = { v8_num(6), v8_num(7), v8_num(5), v8_num(5) }; fun->Call(fun, argc, a2); // @JS[6] C[5] JS[4] C[3] JS[2] C[1] JS[0] v8::Handle<Value> a3[argc] = { v8_num(6), v8_num(6), v8_num(7), v8_num(6) }; fun->Call(fun, argc, a3); // JS[6] *C[5] @JS[4] C[3] JS[2] C[1] JS[0] v8::Handle<Value> a4[argc] = { v8_num(6), v8_num(4), v8_num(5), v8_num(4) }; fun->Call(fun, argc, a4); // JS[6] C[5] *JS[4] @C[3] JS[2] C[1] JS[0] v8::Handle<Value> a5[argc] = { v8_num(6), v8_num(4), v8_num(3), v8_num(3) }; fun->Call(fun, argc, a5); } v8::Handle<Value> ThrowValue(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); CHECK_EQ(1, args.Length()); return v8::ThrowException(args[0]); } THREADED_TEST(ThrowValues) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("Throw"), v8::FunctionTemplate::New(ThrowValue)); LocalContext context(0, templ); v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun( "function Run(obj) {" " try {" " Throw(obj);" " } catch (e) {" " return e;" " }" " return 'no exception';" "}" "[Run('str'), Run(1), Run(0), Run(null), Run(void 0)];")); CHECK_EQ(5, result->Length()); CHECK(result->Get(v8::Integer::New(0))->IsString()); CHECK(result->Get(v8::Integer::New(1))->IsNumber()); CHECK_EQ(1, result->Get(v8::Integer::New(1))->Int32Value()); CHECK(result->Get(v8::Integer::New(2))->IsNumber()); CHECK_EQ(0, result->Get(v8::Integer::New(2))->Int32Value()); CHECK(result->Get(v8::Integer::New(3))->IsNull()); CHECK(result->Get(v8::Integer::New(4))->IsUndefined()); } THREADED_TEST(CatchZero) { v8::HandleScope scope; LocalContext context; v8::TryCatch try_catch; CHECK(!try_catch.HasCaught()); Script::Compile(v8_str("throw 10"))->Run(); CHECK(try_catch.HasCaught()); CHECK_EQ(10, try_catch.Exception()->Int32Value()); try_catch.Reset(); CHECK(!try_catch.HasCaught()); Script::Compile(v8_str("throw 0"))->Run(); CHECK(try_catch.HasCaught()); CHECK_EQ(0, try_catch.Exception()->Int32Value()); } THREADED_TEST(CatchExceptionFromWith) { v8::HandleScope scope; LocalContext context; v8::TryCatch try_catch; CHECK(!try_catch.HasCaught()); Script::Compile(v8_str("var o = {}; with (o) { throw 42; }"))->Run(); CHECK(try_catch.HasCaught()); } THREADED_TEST(Equality) { v8::HandleScope scope; LocalContext context; // Check that equality works at all before relying on CHECK_EQ CHECK(v8_str("a")->Equals(v8_str("a"))); CHECK(!v8_str("a")->Equals(v8_str("b"))); CHECK_EQ(v8_str("a"), v8_str("a")); CHECK_NE(v8_str("a"), v8_str("b")); CHECK_EQ(v8_num(1), v8_num(1)); CHECK_EQ(v8_num(1.00), v8_num(1)); CHECK_NE(v8_num(1), v8_num(2)); // Assume String is not symbol. CHECK(v8_str("a")->StrictEquals(v8_str("a"))); CHECK(!v8_str("a")->StrictEquals(v8_str("b"))); CHECK(!v8_str("5")->StrictEquals(v8_num(5))); CHECK(v8_num(1)->StrictEquals(v8_num(1))); CHECK(!v8_num(1)->StrictEquals(v8_num(2))); CHECK(v8_num(0)->StrictEquals(v8_num(-0))); Local<Value> not_a_number = v8_num(i::OS::nan_value()); CHECK(!not_a_number->StrictEquals(not_a_number)); CHECK(v8::False()->StrictEquals(v8::False())); CHECK(!v8::False()->StrictEquals(v8::Undefined())); v8::Handle<v8::Object> obj = v8::Object::New(); v8::Persistent<v8::Object> alias = v8::Persistent<v8::Object>::New(obj); CHECK(alias->StrictEquals(obj)); alias.Dispose(); } THREADED_TEST(MultiRun) { v8::HandleScope scope; LocalContext context; Local<Script> script = Script::Compile(v8_str("x")); for (int i = 0; i < 10; i++) script->Run(); } static v8::Handle<Value> GetXValue(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK_EQ(info.Data(), v8_str("donut")); CHECK_EQ(name, v8_str("x")); return name; } THREADED_TEST(SimplePropertyRead) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetAccessor(v8_str("x"), GetXValue, NULL, v8_str("donut")); LocalContext context; context->Global()->Set(v8_str("obj"), templ->NewInstance()); Local<Script> script = Script::Compile(v8_str("obj.x")); for (int i = 0; i < 10; i++) { Local<Value> result = script->Run(); CHECK_EQ(result, v8_str("x")); } } v8::Persistent<Value> xValue; static void SetXValue(Local<String> name, Local<Value> value, const AccessorInfo& info) { CHECK_EQ(value, v8_num(4)); CHECK_EQ(info.Data(), v8_str("donut")); CHECK_EQ(name, v8_str("x")); CHECK(xValue.IsEmpty()); xValue = v8::Persistent<Value>::New(value); } THREADED_TEST(SimplePropertyWrite) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetAccessor(v8_str("x"), GetXValue, SetXValue, v8_str("donut")); LocalContext context; context->Global()->Set(v8_str("obj"), templ->NewInstance()); Local<Script> script = Script::Compile(v8_str("obj.x = 4")); for (int i = 0; i < 10; i++) { CHECK(xValue.IsEmpty()); script->Run(); CHECK_EQ(v8_num(4), xValue); xValue.Dispose(); xValue = v8::Persistent<Value>(); } } static v8::Handle<Value> XPropertyGetter(Local<String> property, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(info.Data()->IsUndefined()); return property; } THREADED_TEST(NamedInterceptorPropertyRead) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(XPropertyGetter); LocalContext context; context->Global()->Set(v8_str("obj"), templ->NewInstance()); Local<Script> script = Script::Compile(v8_str("obj.x")); for (int i = 0; i < 10; i++) { Local<Value> result = script->Run(); CHECK_EQ(result, v8_str("x")); } } static v8::Handle<Value> IndexedPropertyGetter(uint32_t index, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); if (index == 37) { return v8::Handle<Value>(v8_num(625)); } return v8::Handle<Value>(); } static v8::Handle<Value> IndexedPropertySetter(uint32_t index, Local<Value> value, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); if (index == 39) { return value; } return v8::Handle<Value>(); } THREADED_TEST(IndexedInterceptorWithIndexedAccessor) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetIndexedPropertyHandler(IndexedPropertyGetter, IndexedPropertySetter); LocalContext context; context->Global()->Set(v8_str("obj"), templ->NewInstance()); Local<Script> getter_script = Script::Compile(v8_str( "obj.__defineGetter__(\"3\", function(){return 5;});obj[3];")); Local<Script> setter_script = Script::Compile(v8_str( "obj.__defineSetter__(\"17\", function(val){this.foo = val;});" "obj[17] = 23;" "obj.foo;")); Local<Script> interceptor_setter_script = Script::Compile(v8_str( "obj.__defineSetter__(\"39\", function(val){this.foo = \"hit\";});" "obj[39] = 47;" "obj.foo;")); // This setter should not run, due to the interceptor. Local<Script> interceptor_getter_script = Script::Compile(v8_str( "obj[37];")); Local<Value> result = getter_script->Run(); CHECK_EQ(v8_num(5), result); result = setter_script->Run(); CHECK_EQ(v8_num(23), result); result = interceptor_setter_script->Run(); CHECK_EQ(v8_num(23), result); result = interceptor_getter_script->Run(); CHECK_EQ(v8_num(625), result); } static v8::Handle<Value> IdentityIndexedPropertyGetter( uint32_t index, const AccessorInfo& info) { return v8::Integer::New(index); } THREADED_TEST(IndexedInterceptorWithNoSetter) { v8::HandleScope scope; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter); LocalContext context; context->Global()->Set(v8_str("obj"), templ->NewInstance()); const char* code = "try {" " obj[0] = 239;" " for (var i = 0; i < 100; i++) {" " var v = obj[0];" " if (v != 0) throw 'Wrong value ' + v + ' at iteration ' + i;" " }" " 'PASSED'" "} catch(e) {" " e" "}"; ExpectString(code, "PASSED"); } THREADED_TEST(MultiContexts) { v8::HandleScope scope; v8::Handle<ObjectTemplate> templ = ObjectTemplate::New(); templ->Set(v8_str("dummy"), v8::FunctionTemplate::New(DummyCallHandler)); Local<String> password = v8_str("Password"); // Create an environment LocalContext context0(0, templ); context0->SetSecurityToken(password); v8::Handle<v8::Object> global0 = context0->Global(); global0->Set(v8_str("custom"), v8_num(1234)); CHECK_EQ(1234, global0->Get(v8_str("custom"))->Int32Value()); // Create an independent environment LocalContext context1(0, templ); context1->SetSecurityToken(password); v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("custom"), v8_num(1234)); CHECK_NE(global0, global1); CHECK_EQ(1234, global0->Get(v8_str("custom"))->Int32Value()); CHECK_EQ(1234, global1->Get(v8_str("custom"))->Int32Value()); // Now create a new context with the old global LocalContext context2(0, templ, global1); context2->SetSecurityToken(password); v8::Handle<v8::Object> global2 = context2->Global(); CHECK_EQ(global1, global2); CHECK_EQ(0, global1->Get(v8_str("custom"))->Int32Value()); CHECK_EQ(0, global2->Get(v8_str("custom"))->Int32Value()); } THREADED_TEST(FunctionPrototypeAcrossContexts) { // Make sure that functions created by cloning boilerplates cannot // communicate through their __proto__ field. v8::HandleScope scope; LocalContext env0; v8::Handle<v8::Object> global0 = env0->Global(); v8::Handle<v8::Object> object0 = v8::Handle<v8::Object>::Cast(global0->Get(v8_str("Object"))); v8::Handle<v8::Object> tostring0 = v8::Handle<v8::Object>::Cast(object0->Get(v8_str("toString"))); v8::Handle<v8::Object> proto0 = v8::Handle<v8::Object>::Cast(tostring0->Get(v8_str("__proto__"))); proto0->Set(v8_str("custom"), v8_num(1234)); LocalContext env1; v8::Handle<v8::Object> global1 = env1->Global(); v8::Handle<v8::Object> object1 = v8::Handle<v8::Object>::Cast(global1->Get(v8_str("Object"))); v8::Handle<v8::Object> tostring1 = v8::Handle<v8::Object>::Cast(object1->Get(v8_str("toString"))); v8::Handle<v8::Object> proto1 = v8::Handle<v8::Object>::Cast(tostring1->Get(v8_str("__proto__"))); CHECK(!proto1->Has(v8_str("custom"))); } THREADED_TEST(Regress892105) { // Make sure that object and array literals created by cloning // boilerplates cannot communicate through their __proto__ // field. This is rather difficult to check, but we try to add stuff // to Object.prototype and Array.prototype and create a new // environment. This should succeed. v8::HandleScope scope; Local<String> source = v8_str("Object.prototype.obj = 1234;" "Array.prototype.arr = 4567;" "8901"); LocalContext env0; Local<Script> script0 = Script::Compile(source); CHECK_EQ(8901.0, script0->Run()->NumberValue()); LocalContext env1; Local<Script> script1 = Script::Compile(source); CHECK_EQ(8901.0, script1->Run()->NumberValue()); } THREADED_TEST(UndetectableObject) { v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(0, v8::Handle<Value>()); desc->InstanceTemplate()->MarkAsUndetectable(); // undetectable Local<v8::Object> obj = desc->GetFunction()->NewInstance(); env->Global()->Set(v8_str("undetectable"), obj); ExpectString("undetectable.toString()", "[object Object]"); ExpectString("typeof undetectable", "undefined"); ExpectString("typeof(undetectable)", "undefined"); ExpectBoolean("typeof undetectable == 'undefined'", true); ExpectBoolean("typeof undetectable == 'object'", false); ExpectBoolean("if (undetectable) { true; } else { false; }", false); ExpectBoolean("!undetectable", true); ExpectObject("true&&undetectable", obj); ExpectBoolean("false&&undetectable", false); ExpectBoolean("true||undetectable", true); ExpectObject("false||undetectable", obj); ExpectObject("undetectable&&true", obj); ExpectObject("undetectable&&false", obj); ExpectBoolean("undetectable||true", true); ExpectBoolean("undetectable||false", false); ExpectBoolean("undetectable==null", true); ExpectBoolean("null==undetectable", true); ExpectBoolean("undetectable==undefined", true); ExpectBoolean("undefined==undetectable", true); ExpectBoolean("undetectable==undetectable", true); ExpectBoolean("undetectable===null", false); ExpectBoolean("null===undetectable", false); ExpectBoolean("undetectable===undefined", false); ExpectBoolean("undefined===undetectable", false); ExpectBoolean("undetectable===undetectable", true); } THREADED_TEST(UndetectableString) { v8::HandleScope scope; LocalContext env; Local<String> obj = String::NewUndetectable("foo"); env->Global()->Set(v8_str("undetectable"), obj); ExpectString("undetectable", "foo"); ExpectString("typeof undetectable", "undefined"); ExpectString("typeof(undetectable)", "undefined"); ExpectBoolean("typeof undetectable == 'undefined'", true); ExpectBoolean("typeof undetectable == 'string'", false); ExpectBoolean("if (undetectable) { true; } else { false; }", false); ExpectBoolean("!undetectable", true); ExpectObject("true&&undetectable", obj); ExpectBoolean("false&&undetectable", false); ExpectBoolean("true||undetectable", true); ExpectObject("false||undetectable", obj); ExpectObject("undetectable&&true", obj); ExpectObject("undetectable&&false", obj); ExpectBoolean("undetectable||true", true); ExpectBoolean("undetectable||false", false); ExpectBoolean("undetectable==null", true); ExpectBoolean("null==undetectable", true); ExpectBoolean("undetectable==undefined", true); ExpectBoolean("undefined==undetectable", true); ExpectBoolean("undetectable==undetectable", true); ExpectBoolean("undetectable===null", false); ExpectBoolean("null===undetectable", false); ExpectBoolean("undetectable===undefined", false); ExpectBoolean("undefined===undetectable", false); ExpectBoolean("undetectable===undetectable", true); } template <typename T> static void USE(T) { } // This test is not intended to be run, just type checked. static void PersistentHandles() { USE(PersistentHandles); Local<String> str = v8_str("foo"); v8::Persistent<String> p_str = v8::Persistent<String>::New(str); USE(p_str); Local<Script> scr = Script::Compile(v8_str("")); v8::Persistent<Script> p_scr = v8::Persistent<Script>::New(scr); USE(p_scr); Local<ObjectTemplate> templ = ObjectTemplate::New(); v8::Persistent<ObjectTemplate> p_templ = v8::Persistent<ObjectTemplate>::New(templ); USE(p_templ); } static v8::Handle<Value> HandleLogDelegator(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8::Undefined(); } THREADED_TEST(GlobalObjectTemplate) { v8::HandleScope handle_scope; Local<ObjectTemplate> global_template = ObjectTemplate::New(); global_template->Set(v8_str("JSNI_Log"), v8::FunctionTemplate::New(HandleLogDelegator)); v8::Persistent<Context> context = Context::New(0, global_template); Context::Scope context_scope(context); Script::Compile(v8_str("JSNI_Log('LOG')"))->Run(); context.Dispose(); } static const char* kSimpleExtensionSource = "function Foo() {" " return 4;" "}"; THREADED_TEST(SimpleExtensions) { v8::HandleScope handle_scope; v8::RegisterExtension(new Extension("simpletest", kSimpleExtensionSource)); const char* extension_names[] = { "simpletest" }; v8::ExtensionConfiguration extensions(1, extension_names); v8::Handle<Context> context = Context::New(&extensions); Context::Scope lock(context); v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run(); CHECK_EQ(result, v8::Integer::New(4)); } static const char* kEvalExtensionSource1 = "function UseEval1() {" " var x = 42;" " return eval('x');" "}"; static const char* kEvalExtensionSource2 = "(function() {" " var x = 42;" " function e() {" " return eval('x');" " }" " this.UseEval2 = e;" "})()"; THREADED_TEST(UseEvalFromExtension) { v8::HandleScope handle_scope; v8::RegisterExtension(new Extension("evaltest1", kEvalExtensionSource1)); v8::RegisterExtension(new Extension("evaltest2", kEvalExtensionSource2)); const char* extension_names[] = { "evaltest1", "evaltest2" }; v8::ExtensionConfiguration extensions(2, extension_names); v8::Handle<Context> context = Context::New(&extensions); Context::Scope lock(context); v8::Handle<Value> result = Script::Compile(v8_str("UseEval1()"))->Run(); CHECK_EQ(result, v8::Integer::New(42)); result = Script::Compile(v8_str("UseEval2()"))->Run(); CHECK_EQ(result, v8::Integer::New(42)); } static const char* kWithExtensionSource1 = "function UseWith1() {" " var x = 42;" " with({x:87}) { return x; }" "}"; static const char* kWithExtensionSource2 = "(function() {" " var x = 42;" " function e() {" " with ({x:87}) { return x; }" " }" " this.UseWith2 = e;" "})()"; THREADED_TEST(UseWithFromExtension) { v8::HandleScope handle_scope; v8::RegisterExtension(new Extension("withtest1", kWithExtensionSource1)); v8::RegisterExtension(new Extension("withtest2", kWithExtensionSource2)); const char* extension_names[] = { "withtest1", "withtest2" }; v8::ExtensionConfiguration extensions(2, extension_names); v8::Handle<Context> context = Context::New(&extensions); Context::Scope lock(context); v8::Handle<Value> result = Script::Compile(v8_str("UseWith1()"))->Run(); CHECK_EQ(result, v8::Integer::New(87)); result = Script::Compile(v8_str("UseWith2()"))->Run(); CHECK_EQ(result, v8::Integer::New(87)); } THREADED_TEST(AutoExtensions) { v8::HandleScope handle_scope; Extension* extension = new Extension("autotest", kSimpleExtensionSource); extension->set_auto_enable(true); v8::RegisterExtension(extension); v8::Handle<Context> context = Context::New(); Context::Scope lock(context); v8::Handle<Value> result = Script::Compile(v8_str("Foo()"))->Run(); CHECK_EQ(result, v8::Integer::New(4)); } static const char* kSyntaxErrorInExtensionSource = "["; // Test that a syntax error in an extension does not cause a fatal // error but results in an empty context. THREADED_TEST(SyntaxErrorExtensions) { v8::HandleScope handle_scope; v8::RegisterExtension(new Extension("syntaxerror", kSyntaxErrorInExtensionSource)); const char* extension_names[] = { "syntaxerror" }; v8::ExtensionConfiguration extensions(1, extension_names); v8::Handle<Context> context = Context::New(&extensions); CHECK(context.IsEmpty()); } static const char* kExceptionInExtensionSource = "throw 42"; // Test that an exception when installing an extension does not cause // a fatal error but results in an empty context. THREADED_TEST(ExceptionExtensions) { v8::HandleScope handle_scope; v8::RegisterExtension(new Extension("exception", kExceptionInExtensionSource)); const char* extension_names[] = { "exception" }; v8::ExtensionConfiguration extensions(1, extension_names); v8::Handle<Context> context = Context::New(&extensions); CHECK(context.IsEmpty()); } static void CheckDependencies(const char* name, const char* expected) { v8::HandleScope handle_scope; v8::ExtensionConfiguration config(1, &name); LocalContext context(&config); CHECK_EQ(String::New(expected), context->Global()->Get(v8_str("loaded"))); } /* * Configuration: * * /-- B <--\ * A <- -- D <-- E * \-- C <--/ */ THREADED_TEST(ExtensionDependency) { static const char* kEDeps[] = { "D" }; v8::RegisterExtension(new Extension("E", "this.loaded += 'E';", 1, kEDeps)); static const char* kDDeps[] = { "B", "C" }; v8::RegisterExtension(new Extension("D", "this.loaded += 'D';", 2, kDDeps)); static const char* kBCDeps[] = { "A" }; v8::RegisterExtension(new Extension("B", "this.loaded += 'B';", 1, kBCDeps)); v8::RegisterExtension(new Extension("C", "this.loaded += 'C';", 1, kBCDeps)); v8::RegisterExtension(new Extension("A", "this.loaded += 'A';")); CheckDependencies("A", "undefinedA"); CheckDependencies("B", "undefinedAB"); CheckDependencies("C", "undefinedAC"); CheckDependencies("D", "undefinedABCD"); CheckDependencies("E", "undefinedABCDE"); v8::HandleScope handle_scope; static const char* exts[2] = { "C", "E" }; v8::ExtensionConfiguration config(2, exts); LocalContext context(&config); CHECK_EQ(v8_str("undefinedACBDE"), context->Global()->Get(v8_str("loaded"))); } static const char* kExtensionTestScript = "native function A();" "native function B();" "native function C();" "function Foo(i) {" " if (i == 0) return A();" " if (i == 1) return B();" " if (i == 2) return C();" "}"; static v8::Handle<Value> CallFun(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); if (args.IsConstructCall()) { args.This()->Set(v8_str("data"), args.Data()); return v8::Null(); } return args.Data(); } class FunctionExtension : public Extension { public: FunctionExtension() : Extension("functiontest", kExtensionTestScript) { } virtual v8::Handle<v8::FunctionTemplate> GetNativeFunction( v8::Handle<String> name); }; static int lookup_count = 0; v8::Handle<v8::FunctionTemplate> FunctionExtension::GetNativeFunction( v8::Handle<String> name) { lookup_count++; if (name->Equals(v8_str("A"))) { return v8::FunctionTemplate::New(CallFun, v8::Integer::New(8)); } else if (name->Equals(v8_str("B"))) { return v8::FunctionTemplate::New(CallFun, v8::Integer::New(7)); } else if (name->Equals(v8_str("C"))) { return v8::FunctionTemplate::New(CallFun, v8::Integer::New(6)); } else { return v8::Handle<v8::FunctionTemplate>(); } } THREADED_TEST(FunctionLookup) { v8::RegisterExtension(new FunctionExtension()); v8::HandleScope handle_scope; static const char* exts[1] = { "functiontest" }; v8::ExtensionConfiguration config(1, exts); LocalContext context(&config); CHECK_EQ(3, lookup_count); CHECK_EQ(v8::Integer::New(8), Script::Compile(v8_str("Foo(0)"))->Run()); CHECK_EQ(v8::Integer::New(7), Script::Compile(v8_str("Foo(1)"))->Run()); CHECK_EQ(v8::Integer::New(6), Script::Compile(v8_str("Foo(2)"))->Run()); } THREADED_TEST(NativeFunctionConstructCall) { v8::RegisterExtension(new FunctionExtension()); v8::HandleScope handle_scope; static const char* exts[1] = { "functiontest" }; v8::ExtensionConfiguration config(1, exts); LocalContext context(&config); for (int i = 0; i < 10; i++) { // Run a few times to ensure that allocation of objects doesn't // change behavior of a constructor function. CHECK_EQ(v8::Integer::New(8), Script::Compile(v8_str("(new A()).data"))->Run()); CHECK_EQ(v8::Integer::New(7), Script::Compile(v8_str("(new B()).data"))->Run()); CHECK_EQ(v8::Integer::New(6), Script::Compile(v8_str("(new C()).data"))->Run()); } } static const char* last_location; static const char* last_message; void StoringErrorCallback(const char* location, const char* message) { if (last_location == NULL) { last_location = location; last_message = message; } } // ErrorReporting creates a circular extensions configuration and // tests that the fatal error handler gets called. This renders V8 // unusable and therefore this test cannot be run in parallel. TEST(ErrorReporting) { v8::V8::SetFatalErrorHandler(StoringErrorCallback); static const char* aDeps[] = { "B" }; v8::RegisterExtension(new Extension("A", "", 1, aDeps)); static const char* bDeps[] = { "A" }; v8::RegisterExtension(new Extension("B", "", 1, bDeps)); last_location = NULL; v8::ExtensionConfiguration config(1, bDeps); v8::Handle<Context> context = Context::New(&config); CHECK(context.IsEmpty()); CHECK_NE(last_location, NULL); } static const char* js_code_causing_huge_string_flattening = "var str = 'X';" "for (var i = 0; i < 30; i++) {" " str = str + str;" "}" "str.match(/X/);"; void OOMCallback(const char* location, const char* message) { exit(0); } TEST(RegexpOutOfMemory) { // Execute a script that causes out of memory when flattening a string. v8::HandleScope scope; v8::V8::SetFatalErrorHandler(OOMCallback); LocalContext context; Local<Script> script = Script::Compile(String::New(js_code_causing_huge_string_flattening)); last_location = NULL; Local<Value> result = script->Run(); CHECK(false); // Should not return. } static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message, v8::Handle<Value> data) { CHECK_EQ(v8::Undefined(), data); CHECK(message->GetScriptResourceName()->IsUndefined()); CHECK_EQ(v8::Undefined(), message->GetScriptResourceName()); message->GetLineNumber(); message->GetSourceLine(); } THREADED_TEST(ErrorWithMissingScriptInfo) { v8::HandleScope scope; LocalContext context; v8::V8::AddMessageListener(MissingScriptInfoMessageListener); Script::Compile(v8_str("throw Error()"))->Run(); v8::V8::RemoveMessageListeners(MissingScriptInfoMessageListener); } int global_index = 0; class Snorkel { public: Snorkel() { index_ = global_index++; } int index_; }; class Whammy { public: Whammy() { cursor_ = 0; } ~Whammy() { script_.Dispose(); } v8::Handle<Script> getScript() { if (script_.IsEmpty()) script_ = v8::Persistent<Script>::New(v8_compile("({}).blammo")); return Local<Script>(*script_); } public: static const int kObjectCount = 256; int cursor_; v8::Persistent<v8::Object> objects_[kObjectCount]; v8::Persistent<Script> script_; }; static void HandleWeakReference(v8::Persistent<v8::Value> obj, void* data) { Snorkel* snorkel = reinterpret_cast<Snorkel*>(data); delete snorkel; obj.ClearWeak(); } v8::Handle<Value> WhammyPropertyGetter(Local<String> name, const AccessorInfo& info) { Whammy* whammy = static_cast<Whammy*>(v8::Handle<v8::External>::Cast(info.Data())->Value()); v8::Persistent<v8::Object> prev = whammy->objects_[whammy->cursor_]; v8::Handle<v8::Object> obj = v8::Object::New(); v8::Persistent<v8::Object> global = v8::Persistent<v8::Object>::New(obj); if (!prev.IsEmpty()) { prev->Set(v8_str("next"), obj); prev.MakeWeak(new Snorkel(), &HandleWeakReference); whammy->objects_[whammy->cursor_].Clear(); } whammy->objects_[whammy->cursor_] = global; whammy->cursor_ = (whammy->cursor_ + 1) % Whammy::kObjectCount; return whammy->getScript()->Run(); } THREADED_TEST(WeakReference) { v8::HandleScope handle_scope; v8::Handle<v8::ObjectTemplate> templ= v8::ObjectTemplate::New(); templ->SetNamedPropertyHandler(WhammyPropertyGetter, 0, 0, 0, 0, v8::External::New(new Whammy())); const char* extension_list[] = { "v8/gc" }; v8::ExtensionConfiguration extensions(1, extension_list); v8::Persistent<Context> context = Context::New(&extensions); Context::Scope context_scope(context); v8::Handle<v8::Object> interceptor = templ->NewInstance(); context->Global()->Set(v8_str("whammy"), interceptor); const char* code = "var last;" "for (var i = 0; i < 10000; i++) {" " var obj = whammy.length;" " if (last) last.next = obj;" " last = obj;" "}" "gc();" "4"; v8::Handle<Value> result = CompileRun(code); CHECK_EQ(4.0, result->NumberValue()); context.Dispose(); } static bool in_scavenge = false; static int last = -1; static void ForceScavenge(v8::Persistent<v8::Value> obj, void* data) { CHECK_EQ(-1, last); last = 0; obj.Dispose(); obj.Clear(); in_scavenge = true; i::Heap::PerformScavenge(); in_scavenge = false; *(reinterpret_cast<bool*>(data)) = true; } static void CheckIsNotInvokedInScavenge(v8::Persistent<v8::Value> obj, void* data) { CHECK_EQ(0, last); last = 1; *(reinterpret_cast<bool*>(data)) = in_scavenge; obj.Dispose(); obj.Clear(); } THREADED_TEST(NoWeakRefCallbacksInScavenge) { // Test verifies that scavenge cannot invoke WeakReferenceCallbacks. // Calling callbacks from scavenges is unsafe as objects held by those // handlers might have become strongly reachable, but scavenge doesn't // check that. v8::Persistent<Context> context = Context::New(); Context::Scope context_scope(context); v8::Persistent<v8::Object> object_a; v8::Persistent<v8::Object> object_b; { v8::HandleScope handle_scope; object_b = v8::Persistent<v8::Object>::New(v8::Object::New()); object_a = v8::Persistent<v8::Object>::New(v8::Object::New()); } bool object_a_disposed = false; object_a.MakeWeak(&object_a_disposed, &ForceScavenge); bool released_in_scavenge = false; object_b.MakeWeak(&released_in_scavenge, &CheckIsNotInvokedInScavenge); while (!object_a_disposed) { i::Heap::CollectAllGarbage(false); } CHECK(!released_in_scavenge); } v8::Handle<Function> args_fun; static v8::Handle<Value> ArgumentsTestCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); CHECK_EQ(args_fun, args.Callee()); CHECK_EQ(3, args.Length()); CHECK_EQ(v8::Integer::New(1), args[0]); CHECK_EQ(v8::Integer::New(2), args[1]); CHECK_EQ(v8::Integer::New(3), args[2]); CHECK_EQ(v8::Undefined(), args[3]); v8::HandleScope scope; i::Heap::CollectAllGarbage(false); return v8::Undefined(); } THREADED_TEST(Arguments) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> global = ObjectTemplate::New(); global->Set(v8_str("f"), v8::FunctionTemplate::New(ArgumentsTestCallback)); LocalContext context(NULL, global); args_fun = v8::Handle<Function>::Cast(context->Global()->Get(v8_str("f"))); v8_compile("f(1, 2, 3)")->Run(); } static v8::Handle<Value> NoBlockGetterX(Local<String> name, const AccessorInfo&) { return v8::Handle<Value>(); } static v8::Handle<Value> NoBlockGetterI(uint32_t index, const AccessorInfo&) { return v8::Handle<Value>(); } static v8::Handle<v8::Boolean> PDeleter(Local<String> name, const AccessorInfo&) { if (!name->Equals(v8_str("foo"))) { return v8::Handle<v8::Boolean>(); // not intercepted } return v8::False(); // intercepted, and don't delete the property } static v8::Handle<v8::Boolean> IDeleter(uint32_t index, const AccessorInfo&) { if (index != 2) { return v8::Handle<v8::Boolean>(); // not intercepted } return v8::False(); // intercepted, and don't delete the property } THREADED_TEST(Deleter) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); obj->SetNamedPropertyHandler(NoBlockGetterX, NULL, NULL, PDeleter, NULL); obj->SetIndexedPropertyHandler(NoBlockGetterI, NULL, NULL, IDeleter, NULL); LocalContext context; context->Global()->Set(v8_str("k"), obj->NewInstance()); CompileRun( "k.foo = 'foo';" "k.bar = 'bar';" "k[2] = 2;" "k[4] = 4;"); CHECK(v8_compile("delete k.foo")->Run()->IsFalse()); CHECK(v8_compile("delete k.bar")->Run()->IsTrue()); CHECK_EQ(v8_compile("k.foo")->Run(), v8_str("foo")); CHECK(v8_compile("k.bar")->Run()->IsUndefined()); CHECK(v8_compile("delete k[2]")->Run()->IsFalse()); CHECK(v8_compile("delete k[4]")->Run()->IsTrue()); CHECK_EQ(v8_compile("k[2]")->Run(), v8_num(2)); CHECK(v8_compile("k[4]")->Run()->IsUndefined()); } static v8::Handle<Value> GetK(Local<String> name, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); if (name->Equals(v8_str("foo")) || name->Equals(v8_str("bar")) || name->Equals(v8_str("baz"))) { return v8::Undefined(); } return v8::Handle<Value>(); } static v8::Handle<Value> IndexedGetK(uint32_t index, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); if (index == 0 || index == 1) return v8::Undefined(); return v8::Handle<Value>(); } static v8::Handle<v8::Array> NamedEnum(const AccessorInfo&) { ApiTestFuzzer::Fuzz(); v8::Handle<v8::Array> result = v8::Array::New(3); result->Set(v8::Integer::New(0), v8_str("foo")); result->Set(v8::Integer::New(1), v8_str("bar")); result->Set(v8::Integer::New(2), v8_str("baz")); return result; } static v8::Handle<v8::Array> IndexedEnum(const AccessorInfo&) { ApiTestFuzzer::Fuzz(); v8::Handle<v8::Array> result = v8::Array::New(2); result->Set(v8::Integer::New(0), v8_str("0")); result->Set(v8::Integer::New(1), v8_str("1")); return result; } THREADED_TEST(Enumerators) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); obj->SetNamedPropertyHandler(GetK, NULL, NULL, NULL, NamedEnum); obj->SetIndexedPropertyHandler(IndexedGetK, NULL, NULL, NULL, IndexedEnum); LocalContext context; context->Global()->Set(v8_str("k"), obj->NewInstance()); v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun( "k[10] = 0;" "k.a = 0;" "k[5] = 0;" "k.b = 0;" "k[4294967295] = 0;" "k.c = 0;" "k[4294967296] = 0;" "k.d = 0;" "k[140000] = 0;" "k.e = 0;" "k[30000000000] = 0;" "k.f = 0;" "var result = [];" "for (var prop in k) {" " result.push(prop);" "}" "result")); // Check that we get all the property names returned including the // ones from the enumerators in the right order: indexed properties // in numerical order, indexed interceptor properties, named // properties in insertion order, named interceptor properties. // This order is not mandated by the spec, so this test is just // documenting our behavior. CHECK_EQ(17, result->Length()); // Indexed properties in numerical order. CHECK_EQ(v8_str("5"), result->Get(v8::Integer::New(0))); CHECK_EQ(v8_str("10"), result->Get(v8::Integer::New(1))); CHECK_EQ(v8_str("140000"), result->Get(v8::Integer::New(2))); CHECK_EQ(v8_str("4294967295"), result->Get(v8::Integer::New(3))); // Indexed interceptor properties in the order they are returned // from the enumerator interceptor. CHECK_EQ(v8_str("0"), result->Get(v8::Integer::New(4))); CHECK_EQ(v8_str("1"), result->Get(v8::Integer::New(5))); // Named properties in insertion order. CHECK_EQ(v8_str("a"), result->Get(v8::Integer::New(6))); CHECK_EQ(v8_str("b"), result->Get(v8::Integer::New(7))); CHECK_EQ(v8_str("c"), result->Get(v8::Integer::New(8))); CHECK_EQ(v8_str("4294967296"), result->Get(v8::Integer::New(9))); CHECK_EQ(v8_str("d"), result->Get(v8::Integer::New(10))); CHECK_EQ(v8_str("e"), result->Get(v8::Integer::New(11))); CHECK_EQ(v8_str("30000000000"), result->Get(v8::Integer::New(12))); CHECK_EQ(v8_str("f"), result->Get(v8::Integer::New(13))); // Named interceptor properties. CHECK_EQ(v8_str("foo"), result->Get(v8::Integer::New(14))); CHECK_EQ(v8_str("bar"), result->Get(v8::Integer::New(15))); CHECK_EQ(v8_str("baz"), result->Get(v8::Integer::New(16))); } int p_getter_count; int p_getter_count2; static v8::Handle<Value> PGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); p_getter_count++; v8::Handle<v8::Object> global = Context::GetCurrent()->Global(); CHECK_EQ(info.Holder(), global->Get(v8_str("o1"))); if (name->Equals(v8_str("p1"))) { CHECK_EQ(info.This(), global->Get(v8_str("o1"))); } else if (name->Equals(v8_str("p2"))) { CHECK_EQ(info.This(), global->Get(v8_str("o2"))); } else if (name->Equals(v8_str("p3"))) { CHECK_EQ(info.This(), global->Get(v8_str("o3"))); } else if (name->Equals(v8_str("p4"))) { CHECK_EQ(info.This(), global->Get(v8_str("o4"))); } return v8::Undefined(); } static void RunHolderTest(v8::Handle<v8::ObjectTemplate> obj) { ApiTestFuzzer::Fuzz(); LocalContext context; context->Global()->Set(v8_str("o1"), obj->NewInstance()); CompileRun( "o1.__proto__ = { };" "var o2 = { __proto__: o1 };" "var o3 = { __proto__: o2 };" "var o4 = { __proto__: o3 };" "for (var i = 0; i < 10; i++) o4.p4;" "for (var i = 0; i < 10; i++) o3.p3;" "for (var i = 0; i < 10; i++) o2.p2;" "for (var i = 0; i < 10; i++) o1.p1;"); } static v8::Handle<Value> PGetter2(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); p_getter_count2++; v8::Handle<v8::Object> global = Context::GetCurrent()->Global(); CHECK_EQ(info.Holder(), global->Get(v8_str("o1"))); if (name->Equals(v8_str("p1"))) { CHECK_EQ(info.This(), global->Get(v8_str("o1"))); } else if (name->Equals(v8_str("p2"))) { CHECK_EQ(info.This(), global->Get(v8_str("o2"))); } else if (name->Equals(v8_str("p3"))) { CHECK_EQ(info.This(), global->Get(v8_str("o3"))); } else if (name->Equals(v8_str("p4"))) { CHECK_EQ(info.This(), global->Get(v8_str("o4"))); } return v8::Undefined(); } THREADED_TEST(GetterHolders) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); obj->SetAccessor(v8_str("p1"), PGetter); obj->SetAccessor(v8_str("p2"), PGetter); obj->SetAccessor(v8_str("p3"), PGetter); obj->SetAccessor(v8_str("p4"), PGetter); p_getter_count = 0; RunHolderTest(obj); CHECK_EQ(40, p_getter_count); } THREADED_TEST(PreInterceptorHolders) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); obj->SetNamedPropertyHandler(PGetter2); p_getter_count2 = 0; RunHolderTest(obj); CHECK_EQ(40, p_getter_count2); } THREADED_TEST(ObjectInstantiation) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetAccessor(v8_str("t"), PGetter2); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); for (int i = 0; i < 100; i++) { v8::HandleScope inner_scope; v8::Handle<v8::Object> obj = templ->NewInstance(); CHECK_NE(obj, context->Global()->Get(v8_str("o"))); context->Global()->Set(v8_str("o2"), obj); v8::Handle<Value> value = Script::Compile(v8_str("o.__proto__ === o2.__proto__"))->Run(); CHECK_EQ(v8::True(), value); context->Global()->Set(v8_str("o"), obj); } } THREADED_TEST(StringWrite) { v8::HandleScope scope; v8::Handle<String> str = v8_str("abcde"); char buf[100]; int len; memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf); CHECK_EQ(len, 5); CHECK_EQ(strncmp("abcde\0", buf, 6), 0); memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf, 0, 4); CHECK_EQ(len, 4); CHECK_EQ(strncmp("abcd\1", buf, 5), 0); memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf, 0, 5); CHECK_EQ(len, 5); CHECK_EQ(strncmp("abcde\1", buf, 6), 0); memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf, 0, 6); CHECK_EQ(len, 5); CHECK_EQ(strncmp("abcde\0", buf, 6), 0); memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf, 4, -1); CHECK_EQ(len, 1); CHECK_EQ(strncmp("e\0", buf, 2), 0); memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf, 4, 6); CHECK_EQ(len, 1); CHECK_EQ(strncmp("e\0", buf, 2), 0); memset(buf, 0x1, sizeof(buf)); len = str->WriteAscii(buf, 4, 1); CHECK_EQ(len, 1); CHECK_EQ(strncmp("e\1", buf, 2), 0); } THREADED_TEST(ToArrayIndex) { v8::HandleScope scope; LocalContext context; v8::Handle<String> str = v8_str("42"); v8::Handle<v8::Uint32> index = str->ToArrayIndex(); CHECK(!index.IsEmpty()); CHECK_EQ(42.0, index->Uint32Value()); str = v8_str("42asdf"); index = str->ToArrayIndex(); CHECK(index.IsEmpty()); str = v8_str("-42"); index = str->ToArrayIndex(); CHECK(index.IsEmpty()); str = v8_str("4294967295"); index = str->ToArrayIndex(); CHECK(!index.IsEmpty()); CHECK_EQ(4294967295.0, index->Uint32Value()); v8::Handle<v8::Number> num = v8::Number::New(1); index = num->ToArrayIndex(); CHECK(!index.IsEmpty()); CHECK_EQ(1.0, index->Uint32Value()); num = v8::Number::New(-1); index = num->ToArrayIndex(); CHECK(index.IsEmpty()); v8::Handle<v8::Object> obj = v8::Object::New(); index = obj->ToArrayIndex(); CHECK(index.IsEmpty()); } THREADED_TEST(ErrorConstruction) { v8::HandleScope scope; LocalContext context; v8::Handle<String> foo = v8_str("foo"); v8::Handle<String> message = v8_str("message"); v8::Handle<Value> range_error = v8::Exception::RangeError(foo); CHECK(range_error->IsObject()); v8::Handle<v8::Object> range_obj(v8::Handle<v8::Object>::Cast(range_error)); CHECK(v8::Handle<v8::Object>::Cast(range_error)->Get(message)->Equals(foo)); v8::Handle<Value> reference_error = v8::Exception::ReferenceError(foo); CHECK(reference_error->IsObject()); CHECK( v8::Handle<v8::Object>::Cast(reference_error)->Get(message)->Equals(foo)); v8::Handle<Value> syntax_error = v8::Exception::SyntaxError(foo); CHECK(syntax_error->IsObject()); CHECK(v8::Handle<v8::Object>::Cast(syntax_error)->Get(message)->Equals(foo)); v8::Handle<Value> type_error = v8::Exception::TypeError(foo); CHECK(type_error->IsObject()); CHECK(v8::Handle<v8::Object>::Cast(type_error)->Get(message)->Equals(foo)); v8::Handle<Value> error = v8::Exception::Error(foo); CHECK(error->IsObject()); CHECK(v8::Handle<v8::Object>::Cast(error)->Get(message)->Equals(foo)); } static v8::Handle<Value> YGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); return v8_num(10); } static void YSetter(Local<String> name, Local<Value> value, const AccessorInfo& info) { if (info.This()->Has(name)) { info.This()->Delete(name); } info.This()->Set(name, value); } THREADED_TEST(DeleteAccessor) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(); obj->SetAccessor(v8_str("y"), YGetter, YSetter); LocalContext context; v8::Handle<v8::Object> holder = obj->NewInstance(); context->Global()->Set(v8_str("holder"), holder); v8::Handle<Value> result = CompileRun( "holder.y = 11; holder.y = 12; holder.y"); CHECK_EQ(12, result->Uint32Value()); } THREADED_TEST(TypeSwitch) { v8::HandleScope scope; v8::Handle<v8::FunctionTemplate> templ1 = v8::FunctionTemplate::New(); v8::Handle<v8::FunctionTemplate> templ2 = v8::FunctionTemplate::New(); v8::Handle<v8::FunctionTemplate> templ3 = v8::FunctionTemplate::New(); v8::Handle<v8::FunctionTemplate> templs[3] = { templ1, templ2, templ3 }; v8::Handle<v8::TypeSwitch> type_switch = v8::TypeSwitch::New(3, templs); LocalContext context; v8::Handle<v8::Object> obj0 = v8::Object::New(); v8::Handle<v8::Object> obj1 = templ1->GetFunction()->NewInstance(); v8::Handle<v8::Object> obj2 = templ2->GetFunction()->NewInstance(); v8::Handle<v8::Object> obj3 = templ3->GetFunction()->NewInstance(); for (int i = 0; i < 10; i++) { CHECK_EQ(0, type_switch->match(obj0)); CHECK_EQ(1, type_switch->match(obj1)); CHECK_EQ(2, type_switch->match(obj2)); CHECK_EQ(3, type_switch->match(obj3)); CHECK_EQ(3, type_switch->match(obj3)); CHECK_EQ(2, type_switch->match(obj2)); CHECK_EQ(1, type_switch->match(obj1)); CHECK_EQ(0, type_switch->match(obj0)); } } // For use within the TestSecurityHandler() test. static bool g_security_callback_result = false; static bool NamedSecurityTestCallback(Local<v8::Object> global, Local<Value> name, v8::AccessType type, Local<Value> data) { // Always allow read access. if (type == v8::ACCESS_GET) return true; // Sometimes allow other access. return g_security_callback_result; } static bool IndexedSecurityTestCallback(Local<v8::Object> global, uint32_t key, v8::AccessType type, Local<Value> data) { // Always allow read access. if (type == v8::ACCESS_GET) return true; // Sometimes allow other access. return g_security_callback_result; } static int trouble_nesting = 0; static v8::Handle<Value> TroubleCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); trouble_nesting++; // Call a JS function that throws an uncaught exception. Local<v8::Object> arg_this = Context::GetCurrent()->Global(); Local<Value> trouble_callee = (trouble_nesting == 3) ? arg_this->Get(v8_str("trouble_callee")) : arg_this->Get(v8_str("trouble_caller")); CHECK(trouble_callee->IsFunction()); return Function::Cast(*trouble_callee)->Call(arg_this, 0, NULL); } static int report_count = 0; static void ApiUncaughtExceptionTestListener(v8::Handle<v8::Message>, v8::Handle<Value>) { report_count++; } // Counts uncaught exceptions, but other tests running in parallel // also have uncaught exceptions. TEST(ApiUncaughtException) { report_count = 0; v8::HandleScope scope; LocalContext env; v8::V8::AddMessageListener(ApiUncaughtExceptionTestListener); Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(TroubleCallback); v8::Local<v8::Object> global = env->Global(); global->Set(v8_str("trouble"), fun->GetFunction()); Script::Compile(v8_str("function trouble_callee() {" " var x = null;" " return x.foo;" "};" "function trouble_caller() {" " trouble();" "};"))->Run(); Local<Value> trouble = global->Get(v8_str("trouble")); CHECK(trouble->IsFunction()); Local<Value> trouble_callee = global->Get(v8_str("trouble_callee")); CHECK(trouble_callee->IsFunction()); Local<Value> trouble_caller = global->Get(v8_str("trouble_caller")); CHECK(trouble_caller->IsFunction()); Function::Cast(*trouble_caller)->Call(global, 0, NULL); CHECK_EQ(1, report_count); v8::V8::RemoveMessageListeners(ApiUncaughtExceptionTestListener); } static const char* script_resource_name = "ExceptionInNativeScript.js"; static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message, v8::Handle<Value>) { v8::Handle<v8::Value> name_val = message->GetScriptResourceName(); CHECK(!name_val.IsEmpty() && name_val->IsString()); v8::String::AsciiValue name(message->GetScriptResourceName()); CHECK_EQ(script_resource_name, *name); CHECK_EQ(3, message->GetLineNumber()); v8::String::AsciiValue source_line(message->GetSourceLine()); CHECK_EQ(" new o.foo();", *source_line); } TEST(ExceptionInNativeScript) { v8::HandleScope scope; LocalContext env; v8::V8::AddMessageListener(ExceptionInNativeScriptTestListener); Local<v8::FunctionTemplate> fun = v8::FunctionTemplate::New(TroubleCallback); v8::Local<v8::Object> global = env->Global(); global->Set(v8_str("trouble"), fun->GetFunction()); Script::Compile(v8_str("function trouble() {\n" " var o = {};\n" " new o.foo();\n" "};"), v8::String::New(script_resource_name))->Run(); Local<Value> trouble = global->Get(v8_str("trouble")); CHECK(trouble->IsFunction()); Function::Cast(*trouble)->Call(global, 0, NULL); v8::V8::RemoveMessageListeners(ExceptionInNativeScriptTestListener); } TEST(CompilationErrorUsingTryCatchHandler) { v8::HandleScope scope; LocalContext env; v8::TryCatch try_catch; Script::Compile(v8_str("This doesn't &*&@#$&*^ compile.")); CHECK_NE(NULL, *try_catch.Exception()); CHECK(try_catch.HasCaught()); } TEST(TryCatchFinallyUsingTryCatchHandler) { v8::HandleScope scope; LocalContext env; v8::TryCatch try_catch; Script::Compile(v8_str("try { throw ''; } catch (e) {}"))->Run(); CHECK(!try_catch.HasCaught()); Script::Compile(v8_str("try { throw ''; } finally {}"))->Run(); CHECK(try_catch.HasCaught()); try_catch.Reset(); Script::Compile(v8_str("(function() {" "try { throw ''; } finally { return; }" "})()"))->Run(); CHECK(!try_catch.HasCaught()); Script::Compile(v8_str("(function()" " { try { throw ''; } finally { throw 0; }" "})()"))->Run(); CHECK(try_catch.HasCaught()); } // SecurityHandler can't be run twice TEST(SecurityHandler) { v8::HandleScope scope0; v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); global_template->SetAccessCheckCallbacks(NamedSecurityTestCallback, IndexedSecurityTestCallback); // Create an environment v8::Persistent<Context> context0 = Context::New(NULL, global_template); context0->Enter(); v8::Handle<v8::Object> global0 = context0->Global(); v8::Handle<Script> script0 = v8_compile("foo = 111"); script0->Run(); global0->Set(v8_str("0"), v8_num(999)); v8::Handle<Value> foo0 = global0->Get(v8_str("foo")); CHECK_EQ(111, foo0->Int32Value()); v8::Handle<Value> z0 = global0->Get(v8_str("0")); CHECK_EQ(999, z0->Int32Value()); // Create another environment, should fail security checks. v8::HandleScope scope1; v8::Persistent<Context> context1 = Context::New(NULL, global_template); context1->Enter(); v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("othercontext"), global0); // This set will fail the security check. v8::Handle<Script> script1 = v8_compile("othercontext.foo = 222; othercontext[0] = 888;"); script1->Run(); // This read will pass the security check. v8::Handle<Value> foo1 = global0->Get(v8_str("foo")); CHECK_EQ(111, foo1->Int32Value()); // This read will pass the security check. v8::Handle<Value> z1 = global0->Get(v8_str("0")); CHECK_EQ(999, z1->Int32Value()); // Create another environment, should pass security checks. { g_security_callback_result = true; // allow security handler to pass. v8::HandleScope scope2; LocalContext context2; v8::Handle<v8::Object> global2 = context2->Global(); global2->Set(v8_str("othercontext"), global0); v8::Handle<Script> script2 = v8_compile("othercontext.foo = 333; othercontext[0] = 888;"); script2->Run(); v8::Handle<Value> foo2 = global0->Get(v8_str("foo")); CHECK_EQ(333, foo2->Int32Value()); v8::Handle<Value> z2 = global0->Get(v8_str("0")); CHECK_EQ(888, z2->Int32Value()); } context1->Exit(); context1.Dispose(); context0->Exit(); context0.Dispose(); } THREADED_TEST(SecurityChecks) { v8::HandleScope handle_scope; LocalContext env1; v8::Persistent<Context> env2 = Context::New(); Local<Value> foo = v8_str("foo"); Local<Value> bar = v8_str("bar"); // Set to the same domain. env1->SetSecurityToken(foo); // Create a function in env1. Script::Compile(v8_str("spy=function(){return spy;}"))->Run(); Local<Value> spy = env1->Global()->Get(v8_str("spy")); CHECK(spy->IsFunction()); // Create another function accessing global objects. Script::Compile(v8_str("spy2=function(){return new this.Array();}"))->Run(); Local<Value> spy2 = env1->Global()->Get(v8_str("spy2")); CHECK(spy2->IsFunction()); // Switch to env2 in the same domain and invoke spy on env2. { env2->SetSecurityToken(foo); // Enter env2 Context::Scope scope_env2(env2); Local<Value> result = Function::Cast(*spy)->Call(env2->Global(), 0, NULL); CHECK(result->IsFunction()); } { env2->SetSecurityToken(bar); Context::Scope scope_env2(env2); // Call cross_domain_call, it should throw an exception v8::TryCatch try_catch; Function::Cast(*spy2)->Call(env2->Global(), 0, NULL); CHECK(try_catch.HasCaught()); } env2.Dispose(); } // Regression test case for issue 1183439. THREADED_TEST(SecurityChecksForPrototypeChain) { v8::HandleScope scope; LocalContext current; v8::Persistent<Context> other = Context::New(); // Change context to be able to get to the Object function in the // other context without hitting the security checks. v8::Local<Value> other_object; { Context::Scope scope(other); other_object = other->Global()->Get(v8_str("Object")); other->Global()->Set(v8_num(42), v8_num(87)); } current->Global()->Set(v8_str("other"), other->Global()); CHECK(v8_compile("other")->Run()->Equals(other->Global())); // Make sure the security check fails here and we get an undefined // result instead of getting the Object function. Repeat in a loop // to make sure to exercise the IC code. v8::Local<Script> access_other0 = v8_compile("other.Object"); v8::Local<Script> access_other1 = v8_compile("other[42]"); for (int i = 0; i < 5; i++) { CHECK(!access_other0->Run()->Equals(other_object)); CHECK(access_other0->Run()->IsUndefined()); CHECK(!access_other1->Run()->Equals(v8_num(87))); CHECK(access_other1->Run()->IsUndefined()); } // Create an object that has 'other' in its prototype chain and make // sure we cannot access the Object function indirectly through // that. Repeat in a loop to make sure to exercise the IC code. v8_compile("function F() { };" "F.prototype = other;" "var f = new F();")->Run(); v8::Local<Script> access_f0 = v8_compile("f.Object"); v8::Local<Script> access_f1 = v8_compile("f[42]"); for (int j = 0; j < 5; j++) { CHECK(!access_f0->Run()->Equals(other_object)); CHECK(access_f0->Run()->IsUndefined()); CHECK(!access_f1->Run()->Equals(v8_num(87))); CHECK(access_f1->Run()->IsUndefined()); } // Now it gets hairy: Set the prototype for the other global object // to be the current global object. The prototype chain for 'f' now // goes through 'other' but ends up in the current global object. { Context::Scope scope(other); other->Global()->Set(v8_str("__proto__"), current->Global()); } // Set a named and an index property on the current global // object. To force the lookup to go through the other global object, // the properties must not exist in the other global object. current->Global()->Set(v8_str("foo"), v8_num(100)); current->Global()->Set(v8_num(99), v8_num(101)); // Try to read the properties from f and make sure that the access // gets stopped by the security checks on the other global object. Local<Script> access_f2 = v8_compile("f.foo"); Local<Script> access_f3 = v8_compile("f[99]"); for (int k = 0; k < 5; k++) { CHECK(!access_f2->Run()->Equals(v8_num(100))); CHECK(access_f2->Run()->IsUndefined()); CHECK(!access_f3->Run()->Equals(v8_num(101))); CHECK(access_f3->Run()->IsUndefined()); } other.Dispose(); } THREADED_TEST(CrossDomainDelete) { v8::HandleScope handle_scope; LocalContext env1; v8::Persistent<Context> env2 = Context::New(); Local<Value> foo = v8_str("foo"); Local<Value> bar = v8_str("bar"); // Set to the same domain. env1->SetSecurityToken(foo); env2->SetSecurityToken(foo); env1->Global()->Set(v8_str("prop"), v8_num(3)); env2->Global()->Set(v8_str("env1"), env1->Global()); // Change env2 to a different domain and delete env1.prop. env2->SetSecurityToken(bar); { Context::Scope scope_env2(env2); Local<Value> result = Script::Compile(v8_str("delete env1.prop"))->Run(); CHECK(result->IsFalse()); } // Check that env1.prop still exists. Local<Value> v = env1->Global()->Get(v8_str("prop")); CHECK(v->IsNumber()); CHECK_EQ(3, v->Int32Value()); env2.Dispose(); } THREADED_TEST(CrossDomainIsPropertyEnumerable) { v8::HandleScope handle_scope; LocalContext env1; v8::Persistent<Context> env2 = Context::New(); Local<Value> foo = v8_str("foo"); Local<Value> bar = v8_str("bar"); // Set to the same domain. env1->SetSecurityToken(foo); env2->SetSecurityToken(foo); env1->Global()->Set(v8_str("prop"), v8_num(3)); env2->Global()->Set(v8_str("env1"), env1->Global()); // env1.prop is enumerable in env2. Local<String> test = v8_str("propertyIsEnumerable.call(env1, 'prop')"); { Context::Scope scope_env2(env2); Local<Value> result = Script::Compile(test)->Run(); CHECK(result->IsTrue()); } // Change env2 to a different domain and test again. env2->SetSecurityToken(bar); { Context::Scope scope_env2(env2); Local<Value> result = Script::Compile(test)->Run(); CHECK(result->IsFalse()); } env2.Dispose(); } THREADED_TEST(CrossDomainForIn) { v8::HandleScope handle_scope; LocalContext env1; v8::Persistent<Context> env2 = Context::New(); Local<Value> foo = v8_str("foo"); Local<Value> bar = v8_str("bar"); // Set to the same domain. env1->SetSecurityToken(foo); env2->SetSecurityToken(foo); env1->Global()->Set(v8_str("prop"), v8_num(3)); env2->Global()->Set(v8_str("env1"), env1->Global()); // Change env2 to a different domain and set env1's global object // as the __proto__ of an object in env2 and enumerate properties // in for-in. It shouldn't enumerate properties on env1's global // object. env2->SetSecurityToken(bar); { Context::Scope scope_env2(env2); Local<Value> result = CompileRun("(function(){var obj = {'__proto__':env1};" "for (var p in obj)" " if (p == 'prop') return false;" "return true;})()"); CHECK(result->IsTrue()); } env2.Dispose(); } TEST(ContextDetachGlobal) { v8::HandleScope handle_scope; LocalContext env1; v8::Persistent<Context> env2 = Context::New(); Local<v8::Object> global1 = env1->Global(); Local<Value> foo = v8_str("foo"); // Set to the same domain. env1->SetSecurityToken(foo); env2->SetSecurityToken(foo); // Enter env2 env2->Enter(); // Create a function in env1 Local<v8::Object> global2 = env2->Global(); global2->Set(v8_str("prop"), v8::Integer::New(1)); CompileRun("function getProp() {return prop;}"); env1->Global()->Set(v8_str("getProp"), global2->Get(v8_str("getProp"))); // Detach env1's global, and reuse the global object of env1 env2->Exit(); env2->DetachGlobal(); // env2 has a new global object. CHECK(!env2->Global()->Equals(global2)); v8::Persistent<Context> env3 = Context::New(0, v8::Handle<v8::ObjectTemplate>(), global2); env3->SetSecurityToken(v8_str("bar")); env3->Enter(); Local<v8::Object> global3 = env3->Global(); CHECK_EQ(global2, global3); CHECK(global3->Get(v8_str("prop"))->IsUndefined()); CHECK(global3->Get(v8_str("getProp"))->IsUndefined()); global3->Set(v8_str("prop"), v8::Integer::New(-1)); global3->Set(v8_str("prop2"), v8::Integer::New(2)); env3->Exit(); // Call getProp in env1, and it should return the value 1 { Local<Value> get_prop = global1->Get(v8_str("getProp")); CHECK(get_prop->IsFunction()); v8::TryCatch try_catch; Local<Value> r = Function::Cast(*get_prop)->Call(global1, 0, NULL); CHECK(!try_catch.HasCaught()); CHECK_EQ(1, r->Int32Value()); } // Check that env3 is not accessible from env1 { Local<Value> r = global3->Get(v8_str("prop2")); CHECK(r->IsUndefined()); } env2.Dispose(); env3.Dispose(); } static bool NamedAccessBlocker(Local<v8::Object> global, Local<Value> name, v8::AccessType type, Local<Value> data) { return Context::GetCurrent()->Global()->Equals(global); } static bool IndexedAccessBlocker(Local<v8::Object> global, uint32_t key, v8::AccessType type, Local<Value> data) { return Context::GetCurrent()->Global()->Equals(global); } static int g_echo_value = -1; static v8::Handle<Value> EchoGetter(Local<String> name, const AccessorInfo& info) { return v8_num(g_echo_value); } static void EchoSetter(Local<String> name, Local<Value> value, const AccessorInfo&) { if (value->IsNumber()) g_echo_value = value->Int32Value(); } static v8::Handle<Value> UnreachableGetter(Local<String> name, const AccessorInfo& info) { CHECK(false); // This function should not be called.. return v8::Undefined(); } static void UnreachableSetter(Local<String>, Local<Value>, const AccessorInfo&) { CHECK(false); // This function should nto be called. } THREADED_TEST(AccessControl) { v8::HandleScope handle_scope; v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); global_template->SetAccessCheckCallbacks(NamedAccessBlocker, IndexedAccessBlocker); // Add an accessor accessible by cross-domain JS code. global_template->SetAccessor( v8_str("accessible_prop"), EchoGetter, EchoSetter, v8::Handle<Value>(), v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE)); // Add an accessor that is not accessible by cross-domain JS code. global_template->SetAccessor(v8_str("blocked_prop"), UnreachableGetter, UnreachableSetter, v8::Handle<Value>(), v8::DEFAULT); // Create an environment v8::Persistent<Context> context0 = Context::New(NULL, global_template); context0->Enter(); v8::Handle<v8::Object> global0 = context0->Global(); v8::HandleScope scope1; v8::Persistent<Context> context1 = Context::New(); context1->Enter(); v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("other"), global0); v8::Handle<Value> value; // Access blocked property value = v8_compile("other.blocked_prop = 1")->Run(); value = v8_compile("other.blocked_prop")->Run(); CHECK(value->IsUndefined()); value = v8_compile("propertyIsEnumerable.call(other, 'blocked_prop')")->Run(); CHECK(value->IsFalse()); // Access accessible property value = v8_compile("other.accessible_prop = 3")->Run(); CHECK(value->IsNumber()); CHECK_EQ(3, value->Int32Value()); value = v8_compile("other.accessible_prop")->Run(); CHECK(value->IsNumber()); CHECK_EQ(3, value->Int32Value()); value = v8_compile("propertyIsEnumerable.call(other, 'accessible_prop')")->Run(); CHECK(value->IsTrue()); // Enumeration doesn't enumerate accessors from inaccessible objects in // the prototype chain even if the accessors are in themselves accessible. Local<Value> result = CompileRun("(function(){var obj = {'__proto__':other};" "for (var p in obj)" " if (p == 'accessible_prop' || p == 'blocked_prop') {" " return false;" " }" "return true;})()"); CHECK(result->IsTrue()); context1->Exit(); context0->Exit(); context1.Dispose(); context0.Dispose(); } static bool GetOwnPropertyNamesNamedBlocker(Local<v8::Object> global, Local<Value> name, v8::AccessType type, Local<Value> data) { return false; } static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global, uint32_t key, v8::AccessType type, Local<Value> data) { return false; } THREADED_TEST(AccessControlGetOwnPropertyNames) { v8::HandleScope handle_scope; v8::Handle<v8::ObjectTemplate> obj_template = v8::ObjectTemplate::New(); obj_template->Set(v8_str("x"), v8::Integer::New(42)); obj_template->SetAccessCheckCallbacks(GetOwnPropertyNamesNamedBlocker, GetOwnPropertyNamesIndexedBlocker); // Create an environment v8::Persistent<Context> context0 = Context::New(NULL, obj_template); context0->Enter(); v8::Handle<v8::Object> global0 = context0->Global(); v8::HandleScope scope1; v8::Persistent<Context> context1 = Context::New(); context1->Enter(); v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("other"), global0); global1->Set(v8_str("object"), obj_template->NewInstance()); v8::Handle<Value> value; // Attempt to get the property names of the other global object and // of an object that requires access checks. Accessing the other // global object should be blocked by access checks on the global // proxy object. Accessing the object that requires access checks // is blocked by the access checks on the object itself. value = CompileRun("Object.getOwnPropertyNames(other).length == 0"); CHECK(value->IsTrue()); value = CompileRun("Object.getOwnPropertyNames(object).length == 0"); CHECK(value->IsTrue()); context1->Exit(); context0->Exit(); context1.Dispose(); context0.Dispose(); } static v8::Handle<Value> ConstTenGetter(Local<String> name, const AccessorInfo& info) { return v8_num(10); } THREADED_TEST(CrossDomainAccessors) { v8::HandleScope handle_scope; v8::Handle<v8::FunctionTemplate> func_template = v8::FunctionTemplate::New(); v8::Handle<v8::ObjectTemplate> global_template = func_template->InstanceTemplate(); v8::Handle<v8::ObjectTemplate> proto_template = func_template->PrototypeTemplate(); // Add an accessor to proto that's accessible by cross-domain JS code. proto_template->SetAccessor(v8_str("accessible"), ConstTenGetter, 0, v8::Handle<Value>(), v8::ALL_CAN_READ); // Add an accessor that is not accessible by cross-domain JS code. global_template->SetAccessor(v8_str("unreachable"), UnreachableGetter, 0, v8::Handle<Value>(), v8::DEFAULT); v8::Persistent<Context> context0 = Context::New(NULL, global_template); context0->Enter(); Local<v8::Object> global = context0->Global(); // Add a normal property that shadows 'accessible' global->Set(v8_str("accessible"), v8_num(11)); // Enter a new context. v8::HandleScope scope1; v8::Persistent<Context> context1 = Context::New(); context1->Enter(); v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("other"), global); // Should return 10, instead of 11 v8::Handle<Value> value = v8_compile("other.accessible")->Run(); CHECK(value->IsNumber()); CHECK_EQ(10, value->Int32Value()); value = v8_compile("other.unreachable")->Run(); CHECK(value->IsUndefined()); context1->Exit(); context0->Exit(); context1.Dispose(); context0.Dispose(); } static int named_access_count = 0; static int indexed_access_count = 0; static bool NamedAccessCounter(Local<v8::Object> global, Local<Value> name, v8::AccessType type, Local<Value> data) { named_access_count++; return true; } static bool IndexedAccessCounter(Local<v8::Object> global, uint32_t key, v8::AccessType type, Local<Value> data) { indexed_access_count++; return true; } // This one is too easily disturbed by other tests. TEST(AccessControlIC) { named_access_count = 0; indexed_access_count = 0; v8::HandleScope handle_scope; // Create an environment. v8::Persistent<Context> context0 = Context::New(); context0->Enter(); // Create an object that requires access-check functions to be // called for cross-domain access. v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New(); object_template->SetAccessCheckCallbacks(NamedAccessCounter, IndexedAccessCounter); Local<v8::Object> object = object_template->NewInstance(); v8::HandleScope scope1; // Create another environment. v8::Persistent<Context> context1 = Context::New(); context1->Enter(); // Make easy access to the object from the other environment. v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("obj"), object); v8::Handle<Value> value; // Check that the named access-control function is called every time. CompileRun("function testProp(obj) {" " for (var i = 0; i < 10; i++) obj.prop = 1;" " for (var j = 0; j < 10; j++) obj.prop;" " return obj.prop" "}"); value = CompileRun("testProp(obj)"); CHECK(value->IsNumber()); CHECK_EQ(1, value->Int32Value()); CHECK_EQ(21, named_access_count); // Check that the named access-control function is called every time. CompileRun("var p = 'prop';" "function testKeyed(obj) {" " for (var i = 0; i < 10; i++) obj[p] = 1;" " for (var j = 0; j < 10; j++) obj[p];" " return obj[p];" "}"); // Use obj which requires access checks. No inline caching is used // in that case. value = CompileRun("testKeyed(obj)"); CHECK(value->IsNumber()); CHECK_EQ(1, value->Int32Value()); CHECK_EQ(42, named_access_count); // Force the inline caches into generic state and try again. CompileRun("testKeyed({ a: 0 })"); CompileRun("testKeyed({ b: 0 })"); value = CompileRun("testKeyed(obj)"); CHECK(value->IsNumber()); CHECK_EQ(1, value->Int32Value()); CHECK_EQ(63, named_access_count); // Check that the indexed access-control function is called every time. CompileRun("function testIndexed(obj) {" " for (var i = 0; i < 10; i++) obj[0] = 1;" " for (var j = 0; j < 10; j++) obj[0];" " return obj[0]" "}"); value = CompileRun("testIndexed(obj)"); CHECK(value->IsNumber()); CHECK_EQ(1, value->Int32Value()); CHECK_EQ(21, indexed_access_count); // Force the inline caches into generic state. CompileRun("testIndexed(new Array(1))"); // Test that the indexed access check is called. value = CompileRun("testIndexed(obj)"); CHECK(value->IsNumber()); CHECK_EQ(1, value->Int32Value()); CHECK_EQ(42, indexed_access_count); // Check that the named access check is called when invoking // functions on an object that requires access checks. CompileRun("obj.f = function() {}"); CompileRun("function testCallNormal(obj) {" " for (var i = 0; i < 10; i++) obj.f();" "}"); CompileRun("testCallNormal(obj)"); CHECK_EQ(74, named_access_count); // Force obj into slow case. value = CompileRun("delete obj.prop"); CHECK(value->BooleanValue()); // Force inline caches into dictionary probing mode. CompileRun("var o = { x: 0 }; delete o.x; testProp(o);"); // Test that the named access check is called. value = CompileRun("testProp(obj);"); CHECK(value->IsNumber()); CHECK_EQ(1, value->Int32Value()); CHECK_EQ(96, named_access_count); // Force the call inline cache into dictionary probing mode. CompileRun("o.f = function() {}; testCallNormal(o)"); // Test that the named access check is still called for each // invocation of the function. value = CompileRun("testCallNormal(obj)"); CHECK_EQ(106, named_access_count); context1->Exit(); context0->Exit(); context1.Dispose(); context0.Dispose(); } static bool NamedAccessFlatten(Local<v8::Object> global, Local<Value> name, v8::AccessType type, Local<Value> data) { char buf[100]; int len; CHECK(name->IsString()); memset(buf, 0x1, sizeof(buf)); len = Local<String>::Cast(name)->WriteAscii(buf); CHECK_EQ(4, len); uint16_t buf2[100]; memset(buf, 0x1, sizeof(buf)); len = Local<String>::Cast(name)->Write(buf2); CHECK_EQ(4, len); return true; } static bool IndexedAccessFlatten(Local<v8::Object> global, uint32_t key, v8::AccessType type, Local<Value> data) { return true; } // Regression test. In access checks, operations that may cause // garbage collection are not allowed. It used to be the case that // using the Write operation on a string could cause a garbage // collection due to flattening of the string. This is no longer the // case. THREADED_TEST(AccessControlFlatten) { named_access_count = 0; indexed_access_count = 0; v8::HandleScope handle_scope; // Create an environment. v8::Persistent<Context> context0 = Context::New(); context0->Enter(); // Create an object that requires access-check functions to be // called for cross-domain access. v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New(); object_template->SetAccessCheckCallbacks(NamedAccessFlatten, IndexedAccessFlatten); Local<v8::Object> object = object_template->NewInstance(); v8::HandleScope scope1; // Create another environment. v8::Persistent<Context> context1 = Context::New(); context1->Enter(); // Make easy access to the object from the other environment. v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("obj"), object); v8::Handle<Value> value; value = v8_compile("var p = 'as' + 'df';")->Run(); value = v8_compile("obj[p];")->Run(); context1->Exit(); context0->Exit(); context1.Dispose(); context0.Dispose(); } static v8::Handle<Value> AccessControlNamedGetter( Local<String>, const AccessorInfo&) { return v8::Integer::New(42); } static v8::Handle<Value> AccessControlNamedSetter( Local<String>, Local<Value> value, const AccessorInfo&) { return value; } static v8::Handle<Value> AccessControlIndexedGetter( uint32_t index, const AccessorInfo& info) { return v8_num(42); } static v8::Handle<Value> AccessControlIndexedSetter( uint32_t, Local<Value> value, const AccessorInfo&) { return value; } THREADED_TEST(AccessControlInterceptorIC) { named_access_count = 0; indexed_access_count = 0; v8::HandleScope handle_scope; // Create an environment. v8::Persistent<Context> context0 = Context::New(); context0->Enter(); // Create an object that requires access-check functions to be // called for cross-domain access. The object also has interceptors // interceptor. v8::Handle<v8::ObjectTemplate> object_template = v8::ObjectTemplate::New(); object_template->SetAccessCheckCallbacks(NamedAccessCounter, IndexedAccessCounter); object_template->SetNamedPropertyHandler(AccessControlNamedGetter, AccessControlNamedSetter); object_template->SetIndexedPropertyHandler(AccessControlIndexedGetter, AccessControlIndexedSetter); Local<v8::Object> object = object_template->NewInstance(); v8::HandleScope scope1; // Create another environment. v8::Persistent<Context> context1 = Context::New(); context1->Enter(); // Make easy access to the object from the other environment. v8::Handle<v8::Object> global1 = context1->Global(); global1->Set(v8_str("obj"), object); v8::Handle<Value> value; // Check that the named access-control function is called every time // eventhough there is an interceptor on the object. value = v8_compile("for (var i = 0; i < 10; i++) obj.x = 1;")->Run(); value = v8_compile("for (var i = 0; i < 10; i++) obj.x;" "obj.x")->Run(); CHECK(value->IsNumber()); CHECK_EQ(42, value->Int32Value()); CHECK_EQ(21, named_access_count); value = v8_compile("var p = 'x';")->Run(); value = v8_compile("for (var i = 0; i < 10; i++) obj[p] = 1;")->Run(); value = v8_compile("for (var i = 0; i < 10; i++) obj[p];" "obj[p]")->Run(); CHECK(value->IsNumber()); CHECK_EQ(42, value->Int32Value()); CHECK_EQ(42, named_access_count); // Check that the indexed access-control function is called every // time eventhough there is an interceptor on the object. value = v8_compile("for (var i = 0; i < 10; i++) obj[0] = 1;")->Run(); value = v8_compile("for (var i = 0; i < 10; i++) obj[0];" "obj[0]")->Run(); CHECK(value->IsNumber()); CHECK_EQ(42, value->Int32Value()); CHECK_EQ(21, indexed_access_count); context1->Exit(); context0->Exit(); context1.Dispose(); context0.Dispose(); } THREADED_TEST(Version) { v8::V8::GetVersion(); } static v8::Handle<Value> InstanceFunctionCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8_num(12); } THREADED_TEST(InstanceProperties) { v8::HandleScope handle_scope; LocalContext context; Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(); Local<ObjectTemplate> instance = t->InstanceTemplate(); instance->Set(v8_str("x"), v8_num(42)); instance->Set(v8_str("f"), v8::FunctionTemplate::New(InstanceFunctionCallback)); Local<Value> o = t->GetFunction()->NewInstance(); context->Global()->Set(v8_str("i"), o); Local<Value> value = Script::Compile(v8_str("i.x"))->Run(); CHECK_EQ(42, value->Int32Value()); value = Script::Compile(v8_str("i.f()"))->Run(); CHECK_EQ(12, value->Int32Value()); } static v8::Handle<Value> GlobalObjectInstancePropertiesGet(Local<String> key, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); return v8::Handle<Value>(); } THREADED_TEST(GlobalObjectInstanceProperties) { v8::HandleScope handle_scope; Local<Value> global_object; Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(); t->InstanceTemplate()->SetNamedPropertyHandler( GlobalObjectInstancePropertiesGet); Local<ObjectTemplate> instance_template = t->InstanceTemplate(); instance_template->Set(v8_str("x"), v8_num(42)); instance_template->Set(v8_str("f"), v8::FunctionTemplate::New(InstanceFunctionCallback)); { LocalContext env(NULL, instance_template); // Hold on to the global object so it can be used again in another // environment initialization. global_object = env->Global(); Local<Value> value = Script::Compile(v8_str("x"))->Run(); CHECK_EQ(42, value->Int32Value()); value = Script::Compile(v8_str("f()"))->Run(); CHECK_EQ(12, value->Int32Value()); } { // Create new environment reusing the global object. LocalContext env(NULL, instance_template, global_object); Local<Value> value = Script::Compile(v8_str("x"))->Run(); CHECK_EQ(42, value->Int32Value()); value = Script::Compile(v8_str("f()"))->Run(); CHECK_EQ(12, value->Int32Value()); } } static v8::Handle<Value> ShadowFunctionCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8_num(42); } static int shadow_y; static int shadow_y_setter_call_count; static int shadow_y_getter_call_count; static void ShadowYSetter(Local<String>, Local<Value>, const AccessorInfo&) { shadow_y_setter_call_count++; shadow_y = 42; } static v8::Handle<Value> ShadowYGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); shadow_y_getter_call_count++; return v8_num(shadow_y); } static v8::Handle<Value> ShadowIndexedGet(uint32_t index, const AccessorInfo& info) { return v8::Handle<Value>(); } static v8::Handle<Value> ShadowNamedGet(Local<String> key, const AccessorInfo&) { return v8::Handle<Value>(); } THREADED_TEST(ShadowObject) { shadow_y = shadow_y_setter_call_count = shadow_y_getter_call_count = 0; v8::HandleScope handle_scope; Local<ObjectTemplate> global_template = v8::ObjectTemplate::New(); LocalContext context(NULL, global_template); Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(); t->InstanceTemplate()->SetNamedPropertyHandler(ShadowNamedGet); t->InstanceTemplate()->SetIndexedPropertyHandler(ShadowIndexedGet); Local<ObjectTemplate> proto = t->PrototypeTemplate(); Local<ObjectTemplate> instance = t->InstanceTemplate(); // Only allow calls of f on instances of t. Local<v8::Signature> signature = v8::Signature::New(t); proto->Set(v8_str("f"), v8::FunctionTemplate::New(ShadowFunctionCallback, Local<Value>(), signature)); proto->Set(v8_str("x"), v8_num(12)); instance->SetAccessor(v8_str("y"), ShadowYGetter, ShadowYSetter); Local<Value> o = t->GetFunction()->NewInstance(); context->Global()->Set(v8_str("__proto__"), o); Local<Value> value = Script::Compile(v8_str("propertyIsEnumerable(0)"))->Run(); CHECK(value->IsBoolean()); CHECK(!value->BooleanValue()); value = Script::Compile(v8_str("x"))->Run(); CHECK_EQ(12, value->Int32Value()); value = Script::Compile(v8_str("f()"))->Run(); CHECK_EQ(42, value->Int32Value()); Script::Compile(v8_str("y = 42"))->Run(); CHECK_EQ(1, shadow_y_setter_call_count); value = Script::Compile(v8_str("y"))->Run(); CHECK_EQ(1, shadow_y_getter_call_count); CHECK_EQ(42, value->Int32Value()); } THREADED_TEST(HiddenPrototype) { v8::HandleScope handle_scope; LocalContext context; Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(); t0->InstanceTemplate()->Set(v8_str("x"), v8_num(0)); Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(); t1->SetHiddenPrototype(true); t1->InstanceTemplate()->Set(v8_str("y"), v8_num(1)); Local<v8::FunctionTemplate> t2 = v8::FunctionTemplate::New(); t2->SetHiddenPrototype(true); t2->InstanceTemplate()->Set(v8_str("z"), v8_num(2)); Local<v8::FunctionTemplate> t3 = v8::FunctionTemplate::New(); t3->InstanceTemplate()->Set(v8_str("u"), v8_num(3)); Local<v8::Object> o0 = t0->GetFunction()->NewInstance(); Local<v8::Object> o1 = t1->GetFunction()->NewInstance(); Local<v8::Object> o2 = t2->GetFunction()->NewInstance(); Local<v8::Object> o3 = t3->GetFunction()->NewInstance(); // Setting the prototype on an object skips hidden prototypes. CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value()); o0->Set(v8_str("__proto__"), o1); CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value()); CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value()); o0->Set(v8_str("__proto__"), o2); CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value()); CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value()); CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value()); o0->Set(v8_str("__proto__"), o3); CHECK_EQ(0, o0->Get(v8_str("x"))->Int32Value()); CHECK_EQ(1, o0->Get(v8_str("y"))->Int32Value()); CHECK_EQ(2, o0->Get(v8_str("z"))->Int32Value()); CHECK_EQ(3, o0->Get(v8_str("u"))->Int32Value()); // Getting the prototype of o0 should get the first visible one // which is o3. Therefore, z should not be defined on the prototype // object. Local<Value> proto = o0->Get(v8_str("__proto__")); CHECK(proto->IsObject()); CHECK(Local<v8::Object>::Cast(proto)->Get(v8_str("z"))->IsUndefined()); } THREADED_TEST(GetterSetterExceptions) { v8::HandleScope handle_scope; LocalContext context; CompileRun( "function Foo() { };" "function Throw() { throw 5; };" "var x = { };" "x.__defineSetter__('set', Throw);" "x.__defineGetter__('get', Throw);"); Local<v8::Object> x = Local<v8::Object>::Cast(context->Global()->Get(v8_str("x"))); v8::TryCatch try_catch; x->Set(v8_str("set"), v8::Integer::New(8)); x->Get(v8_str("get")); x->Set(v8_str("set"), v8::Integer::New(8)); x->Get(v8_str("get")); x->Set(v8_str("set"), v8::Integer::New(8)); x->Get(v8_str("get")); x->Set(v8_str("set"), v8::Integer::New(8)); x->Get(v8_str("get")); } THREADED_TEST(Constructor) { v8::HandleScope handle_scope; LocalContext context; Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->SetClassName(v8_str("Fun")); Local<Function> cons = templ->GetFunction(); context->Global()->Set(v8_str("Fun"), cons); Local<v8::Object> inst = cons->NewInstance(); i::Handle<i::JSObject> obj = v8::Utils::OpenHandle(*inst); Local<Value> value = CompileRun("(new Fun()).constructor === Fun"); CHECK(value->BooleanValue()); } THREADED_TEST(FunctionDescriptorException) { v8::HandleScope handle_scope; LocalContext context; Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->SetClassName(v8_str("Fun")); Local<Function> cons = templ->GetFunction(); context->Global()->Set(v8_str("Fun"), cons); Local<Value> value = CompileRun( "function test() {" " try {" " (new Fun()).blah()" " } catch (e) {" " var str = String(e);" " if (str.indexOf('TypeError') == -1) return 1;" " if (str.indexOf('[object Fun]') != -1) return 2;" " if (str.indexOf('#<a Fun>') == -1) return 3;" " return 0;" " }" " return 4;" "}" "test();"); CHECK_EQ(0, value->Int32Value()); } THREADED_TEST(EvalAliasedDynamic) { v8::HandleScope scope; LocalContext current; // Tests where aliased eval can only be resolved dynamically. Local<Script> script = Script::Compile(v8_str("function f(x) { " " var foo = 2;" " with (x) { return eval('foo'); }" "}" "foo = 0;" "result1 = f(new Object());" "result2 = f(this);" "var x = new Object();" "x.eval = function(x) { return 1; };" "result3 = f(x);")); script->Run(); CHECK_EQ(2, current->Global()->Get(v8_str("result1"))->Int32Value()); CHECK_EQ(0, current->Global()->Get(v8_str("result2"))->Int32Value()); CHECK_EQ(1, current->Global()->Get(v8_str("result3"))->Int32Value()); v8::TryCatch try_catch; script = Script::Compile(v8_str("function f(x) { " " var bar = 2;" " with (x) { return eval('bar'); }" "}" "f(this)")); script->Run(); CHECK(try_catch.HasCaught()); try_catch.Reset(); } THREADED_TEST(CrossEval) { v8::HandleScope scope; LocalContext other; LocalContext current; Local<String> token = v8_str("<security token>"); other->SetSecurityToken(token); current->SetSecurityToken(token); // Setup reference from current to other. current->Global()->Set(v8_str("other"), other->Global()); // Check that new variables are introduced in other context. Local<Script> script = Script::Compile(v8_str("other.eval('var foo = 1234')")); script->Run(); Local<Value> foo = other->Global()->Get(v8_str("foo")); CHECK_EQ(1234, foo->Int32Value()); CHECK(!current->Global()->Has(v8_str("foo"))); // Check that writing to non-existing properties introduces them in // the other context. script = Script::Compile(v8_str("other.eval('na = 1234')")); script->Run(); CHECK_EQ(1234, other->Global()->Get(v8_str("na"))->Int32Value()); CHECK(!current->Global()->Has(v8_str("na"))); // Check that global variables in current context are not visible in other // context. v8::TryCatch try_catch; script = Script::Compile(v8_str("var bar = 42; other.eval('bar');")); Local<Value> result = script->Run(); CHECK(try_catch.HasCaught()); try_catch.Reset(); // Check that local variables in current context are not visible in other // context. script = Script::Compile(v8_str("(function() { " " var baz = 87;" " return other.eval('baz');" "})();")); result = script->Run(); CHECK(try_catch.HasCaught()); try_catch.Reset(); // Check that global variables in the other environment are visible // when evaluting code. other->Global()->Set(v8_str("bis"), v8_num(1234)); script = Script::Compile(v8_str("other.eval('bis')")); CHECK_EQ(1234, script->Run()->Int32Value()); CHECK(!try_catch.HasCaught()); // Check that the 'this' pointer points to the global object evaluating // code. other->Global()->Set(v8_str("t"), other->Global()); script = Script::Compile(v8_str("other.eval('this == t')")); result = script->Run(); CHECK(result->IsTrue()); CHECK(!try_catch.HasCaught()); // Check that variables introduced in with-statement are not visible in // other context. script = Script::Compile(v8_str("with({x:2}){other.eval('x')}")); result = script->Run(); CHECK(try_catch.HasCaught()); try_catch.Reset(); // Check that you cannot use 'eval.call' with another object than the // current global object. script = Script::Compile(v8_str("other.y = 1; eval.call(other, 'y')")); result = script->Run(); CHECK(try_catch.HasCaught()); } // Test that calling eval in a context which has been detached from // its global throws an exception. This behavior is consistent with // other JavaScript implementations. THREADED_TEST(EvalInDetachedGlobal) { v8::HandleScope scope; v8::Persistent<Context> context0 = Context::New(); v8::Persistent<Context> context1 = Context::New(); // Setup function in context0 that uses eval from context0. context0->Enter(); v8::Handle<v8::Value> fun = CompileRun("var x = 42;" "(function() {" " var e = eval;" " return function(s) { return e(s); }" "})()"); context0->Exit(); // Put the function into context1 and call it before and after // detaching the global. Before detaching, the call succeeds and // after detaching and exception is thrown. context1->Enter(); context1->Global()->Set(v8_str("fun"), fun); v8::Handle<v8::Value> x_value = CompileRun("fun('x')"); CHECK_EQ(42, x_value->Int32Value()); context0->DetachGlobal(); v8::TryCatch catcher; x_value = CompileRun("fun('x')"); CHECK(x_value.IsEmpty()); CHECK(catcher.HasCaught()); context1->Exit(); context1.Dispose(); context0.Dispose(); } THREADED_TEST(CrossLazyLoad) { v8::HandleScope scope; LocalContext other; LocalContext current; Local<String> token = v8_str("<security token>"); other->SetSecurityToken(token); current->SetSecurityToken(token); // Setup reference from current to other. current->Global()->Set(v8_str("other"), other->Global()); // Trigger lazy loading in other context. Local<Script> script = Script::Compile(v8_str("other.eval('new Date(42)')")); Local<Value> value = script->Run(); CHECK_EQ(42.0, value->NumberValue()); } static v8::Handle<Value> call_as_function(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); if (args.IsConstructCall()) { if (args[0]->IsInt32()) { return v8_num(-args[0]->Int32Value()); } } return args[0]; } // Test that a call handler can be set for objects which will allow // non-function objects created through the API to be called as // functions. THREADED_TEST(CallAsFunction) { v8::HandleScope scope; LocalContext context; Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(); Local<ObjectTemplate> instance_template = t->InstanceTemplate(); instance_template->SetCallAsFunctionHandler(call_as_function); Local<v8::Object> instance = t->GetFunction()->NewInstance(); context->Global()->Set(v8_str("obj"), instance); v8::TryCatch try_catch; Local<Value> value; CHECK(!try_catch.HasCaught()); value = CompileRun("obj(42)"); CHECK(!try_catch.HasCaught()); CHECK_EQ(42, value->Int32Value()); value = CompileRun("(function(o){return o(49)})(obj)"); CHECK(!try_catch.HasCaught()); CHECK_EQ(49, value->Int32Value()); // test special case of call as function value = CompileRun("[obj]['0'](45)"); CHECK(!try_catch.HasCaught()); CHECK_EQ(45, value->Int32Value()); value = CompileRun("obj.call = Function.prototype.call;" "obj.call(null, 87)"); CHECK(!try_catch.HasCaught()); CHECK_EQ(87, value->Int32Value()); // Regression tests for bug #1116356: Calling call through call/apply // must work for non-function receivers. const char* apply_99 = "Function.prototype.call.apply(obj, [this, 99])"; value = CompileRun(apply_99); CHECK(!try_catch.HasCaught()); CHECK_EQ(99, value->Int32Value()); const char* call_17 = "Function.prototype.call.call(obj, this, 17)"; value = CompileRun(call_17); CHECK(!try_catch.HasCaught()); CHECK_EQ(17, value->Int32Value()); // Check that the call-as-function handler can be called through // new. value = CompileRun("new obj(43)"); CHECK(!try_catch.HasCaught()); CHECK_EQ(-43, value->Int32Value()); } static int CountHandles() { return v8::HandleScope::NumberOfHandles(); } static int Recurse(int depth, int iterations) { v8::HandleScope scope; if (depth == 0) return CountHandles(); for (int i = 0; i < iterations; i++) { Local<v8::Number> n = v8::Integer::New(42); } return Recurse(depth - 1, iterations); } THREADED_TEST(HandleIteration) { static const int kIterations = 500; static const int kNesting = 200; CHECK_EQ(0, CountHandles()); { v8::HandleScope scope1; CHECK_EQ(0, CountHandles()); for (int i = 0; i < kIterations; i++) { Local<v8::Number> n = v8::Integer::New(42); CHECK_EQ(i + 1, CountHandles()); } CHECK_EQ(kIterations, CountHandles()); { v8::HandleScope scope2; for (int j = 0; j < kIterations; j++) { Local<v8::Number> n = v8::Integer::New(42); CHECK_EQ(j + 1 + kIterations, CountHandles()); } } CHECK_EQ(kIterations, CountHandles()); } CHECK_EQ(0, CountHandles()); CHECK_EQ(kNesting * kIterations, Recurse(kNesting, kIterations)); } static v8::Handle<Value> InterceptorHasOwnPropertyGetter( Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); return v8::Handle<Value>(); } THREADED_TEST(InterceptorHasOwnProperty) { v8::HandleScope scope; LocalContext context; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(); Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate(); instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetter); Local<Function> function = fun_templ->GetFunction(); context->Global()->Set(v8_str("constructor"), function); v8::Handle<Value> value = CompileRun( "var o = new constructor();" "o.hasOwnProperty('ostehaps');"); CHECK_EQ(false, value->BooleanValue()); value = CompileRun( "o.ostehaps = 42;" "o.hasOwnProperty('ostehaps');"); CHECK_EQ(true, value->BooleanValue()); value = CompileRun( "var p = new constructor();" "p.hasOwnProperty('ostehaps');"); CHECK_EQ(false, value->BooleanValue()); } static v8::Handle<Value> InterceptorHasOwnPropertyGetterGC( Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); i::Heap::CollectAllGarbage(false); return v8::Handle<Value>(); } THREADED_TEST(InterceptorHasOwnPropertyCausingGC) { v8::HandleScope scope; LocalContext context; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(); Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate(); instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetterGC); Local<Function> function = fun_templ->GetFunction(); context->Global()->Set(v8_str("constructor"), function); // Let's first make some stuff so we can be sure to get a good GC. CompileRun( "function makestr(size) {" " switch (size) {" " case 1: return 'f';" " case 2: return 'fo';" " case 3: return 'foo';" " }" " return makestr(size >> 1) + makestr((size + 1) >> 1);" "}" "var x = makestr(12345);" "x = makestr(31415);" "x = makestr(23456);"); v8::Handle<Value> value = CompileRun( "var o = new constructor();" "o.__proto__ = new String(x);" "o.hasOwnProperty('ostehaps');"); CHECK_EQ(false, value->BooleanValue()); } typedef v8::Handle<Value> (*NamedPropertyGetter)(Local<String> property, const AccessorInfo& info); static void CheckInterceptorLoadIC(NamedPropertyGetter getter, const char* source, int expected) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(getter); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun(source); CHECK_EQ(expected, value->Int32Value()); } static v8::Handle<Value> InterceptorLoadICGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(v8_str("x")->Equals(name)); return v8::Integer::New(42); } // This test should hit the load IC for the interceptor case. THREADED_TEST(InterceptorLoadIC) { CheckInterceptorLoadIC(InterceptorLoadICGetter, "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = o.x;" "}", 42); } // Below go several tests which verify that JITing for various // configurations of interceptor and explicit fields works fine // (those cases are special cased to get better performance). static v8::Handle<Value> InterceptorLoadXICGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); return v8_str("x")->Equals(name) ? v8::Integer::New(42) : v8::Handle<v8::Value>(); } THREADED_TEST(InterceptorLoadICWithFieldOnHolder) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "var result = 0;" "o.y = 239;" "for (var i = 0; i < 1000; i++) {" " result = o.y;" "}", 239); } THREADED_TEST(InterceptorLoadICWithSubstitutedProto) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "var result = 0;" "o.__proto__ = { 'y': 239 };" "for (var i = 0; i < 1000; i++) {" " result = o.y + o.x;" "}", 239 + 42); } THREADED_TEST(InterceptorLoadICWithPropertyOnProto) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "var result = 0;" "o.__proto__.y = 239;" "for (var i = 0; i < 1000; i++) {" " result = o.y + o.x;" "}", 239 + 42); } THREADED_TEST(InterceptorLoadICUndefined) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = (o.y == undefined) ? 239 : 42;" "}", 239); } THREADED_TEST(InterceptorLoadICWithOverride) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "fst = new Object(); fst.__proto__ = o;" "snd = new Object(); snd.__proto__ = fst;" "var result1 = 0;" "for (var i = 0; i < 1000; i++) {" " result1 = snd.x;" "}" "fst.x = 239;" "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = snd.x;" "}" "result + result1", 239 + 42); } // Test the case when we stored field into // a stub, but interceptor produced value on its own. THREADED_TEST(InterceptorLoadICFieldNotNeeded) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "proto = new Object();" "o.__proto__ = proto;" "proto.x = 239;" "for (var i = 0; i < 1000; i++) {" " o.x;" // Now it should be ICed and keep a reference to x defined on proto "}" "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result += o.x;" "}" "result;", 42 * 1000); } // Test the case when we stored field into // a stub, but it got invalidated later on. THREADED_TEST(InterceptorLoadICInvalidatedField) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "proto1 = new Object();" "proto2 = new Object();" "o.__proto__ = proto1;" "proto1.__proto__ = proto2;" "proto2.y = 239;" "for (var i = 0; i < 1000; i++) {" " o.y;" // Now it should be ICed and keep a reference to y defined on proto2 "}" "proto1.y = 42;" "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result += o.y;" "}" "result;", 42 * 1000); } // Test the case when we stored field into // a stub, but it got invalidated later on due to override on // global object which is between interceptor and fields' holders. THREADED_TEST(InterceptorLoadICInvalidatedFieldViaGlobal) { CheckInterceptorLoadIC(InterceptorLoadXICGetter, "o.__proto__ = this;" // set a global to be a proto of o. "this.__proto__.y = 239;" "for (var i = 0; i < 10; i++) {" " if (o.y != 239) throw 'oops: ' + o.y;" // Now it should be ICed and keep a reference to y defined on field_holder. "}" "this.y = 42;" // Assign on a global. "var result = 0;" "for (var i = 0; i < 10; i++) {" " result += o.y;" "}" "result;", 42 * 10); } static v8::Handle<Value> Return239(Local<String> name, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); return v8_num(239); } static void SetOnThis(Local<String> name, Local<Value> value, const AccessorInfo& info) { info.This()->ForceSet(name, value); } THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorLoadXICGetter); templ->SetAccessor(v8_str("y"), Return239); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "var result = 0;" "for (var i = 0; i < 7; i++) {" " result = o.y;" "}"); CHECK_EQ(239, value->Int32Value()); } THREADED_TEST(InterceptorLoadICWithCallbackOnProto) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); templ_p->SetAccessor(v8_str("y"), Return239); LocalContext context; context->Global()->Set(v8_str("o"), templ_o->NewInstance()); context->Global()->Set(v8_str("p"), templ_p->NewInstance()); v8::Handle<Value> value = CompileRun( "o.__proto__ = p;" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result = o.x + o.y;" "}"); CHECK_EQ(239 + 42, value->Int32Value()); } THREADED_TEST(InterceptorLoadICForCallbackWithOverride) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorLoadXICGetter); templ->SetAccessor(v8_str("y"), Return239); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "fst = new Object(); fst.__proto__ = o;" "snd = new Object(); snd.__proto__ = fst;" "var result1 = 0;" "for (var i = 0; i < 7; i++) {" " result1 = snd.x;" "}" "fst.x = 239;" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result = snd.x;" "}" "result + result1"); CHECK_EQ(239 + 42, value->Int32Value()); } // Test the case when we stored callback into // a stub, but interceptor produced value on its own. THREADED_TEST(InterceptorLoadICCallbackNotNeeded) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); templ_p->SetAccessor(v8_str("y"), Return239); LocalContext context; context->Global()->Set(v8_str("o"), templ_o->NewInstance()); context->Global()->Set(v8_str("p"), templ_p->NewInstance()); v8::Handle<Value> value = CompileRun( "o.__proto__ = p;" "for (var i = 0; i < 7; i++) {" " o.x;" // Now it should be ICed and keep a reference to x defined on p "}" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result += o.x;" "}" "result"); CHECK_EQ(42 * 7, value->Int32Value()); } // Test the case when we stored callback into // a stub, but it got invalidated later on. THREADED_TEST(InterceptorLoadICInvalidatedCallback) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis); LocalContext context; context->Global()->Set(v8_str("o"), templ_o->NewInstance()); context->Global()->Set(v8_str("p"), templ_p->NewInstance()); v8::Handle<Value> value = CompileRun( "inbetween = new Object();" "o.__proto__ = inbetween;" "inbetween.__proto__ = p;" "for (var i = 0; i < 10; i++) {" " o.y;" // Now it should be ICed and keep a reference to y defined on p "}" "inbetween.y = 42;" "var result = 0;" "for (var i = 0; i < 10; i++) {" " result += o.y;" "}" "result"); CHECK_EQ(42 * 10, value->Int32Value()); } // Test the case when we stored callback into // a stub, but it got invalidated later on due to override on // global object which is between interceptor and callbacks' holders. THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter); v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(); templ_p->SetAccessor(v8_str("y"), Return239, SetOnThis); LocalContext context; context->Global()->Set(v8_str("o"), templ_o->NewInstance()); context->Global()->Set(v8_str("p"), templ_p->NewInstance()); v8::Handle<Value> value = CompileRun( "o.__proto__ = this;" "this.__proto__ = p;" "for (var i = 0; i < 10; i++) {" " if (o.y != 239) throw 'oops: ' + o.y;" // Now it should be ICed and keep a reference to y defined on p "}" "this.y = 42;" "var result = 0;" "for (var i = 0; i < 10; i++) {" " result += o.y;" "}" "result"); CHECK_EQ(42 * 10, value->Int32Value()); } static v8::Handle<Value> InterceptorLoadICGetter0(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(v8_str("x")->Equals(name)); return v8::Integer::New(0); } THREADED_TEST(InterceptorReturningZero) { CheckInterceptorLoadIC(InterceptorLoadICGetter0, "o.x == undefined ? 1 : 0", 0); } static v8::Handle<Value> InterceptorStoreICSetter( Local<String> key, Local<Value> value, const AccessorInfo&) { CHECK(v8_str("x")->Equals(key)); CHECK_EQ(42, value->Int32Value()); return value; } // This test should hit the store IC for the interceptor case. THREADED_TEST(InterceptorStoreIC) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorLoadICGetter, InterceptorStoreICSetter); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "for (var i = 0; i < 1000; i++) {" " o.x = 42;" "}"); } THREADED_TEST(InterceptorStoreICWithNoSetter) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorLoadXICGetter); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "for (var i = 0; i < 1000; i++) {" " o.y = 239;" "}" "42 + o.y"); CHECK_EQ(239 + 42, value->Int32Value()); } v8::Handle<Value> call_ic_function; v8::Handle<Value> call_ic_function2; v8::Handle<Value> call_ic_function3; static v8::Handle<Value> InterceptorCallICGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(v8_str("x")->Equals(name)); return call_ic_function; } // This test should hit the call IC for the interceptor case. THREADED_TEST(InterceptorCallIC) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorCallICGetter); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); call_ic_function = v8_compile("function f(x) { return x + 1; }; f")->Run(); v8::Handle<Value> value = CompileRun( "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = o.x(41);" "}"); CHECK_EQ(42, value->Int32Value()); } // This test checks that if interceptor doesn't provide // a value, we can fetch regular value. THREADED_TEST(InterceptorCallICSeesOthers) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(NoBlockGetterX); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "o.x = function f(x) { return x + 1; };" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result = o.x(41);" "}"); CHECK_EQ(42, value->Int32Value()); } static v8::Handle<Value> call_ic_function4; static v8::Handle<Value> InterceptorCallICGetter4(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); CHECK(v8_str("x")->Equals(name)); return call_ic_function4; } // This test checks that if interceptor provides a function, // even if we cached shadowed variant, interceptor's function // is invoked THREADED_TEST(InterceptorCallICCacheableNotNeeded) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorCallICGetter4); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); call_ic_function4 = v8_compile("function f(x) { return x - 1; }; f")->Run(); v8::Handle<Value> value = CompileRun( "o.__proto__.x = function(x) { return x + 1; };" "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = o.x(42);" "}"); CHECK_EQ(41, value->Int32Value()); } // Test the case when we stored cacheable lookup into // a stub, but it got invalidated later on THREADED_TEST(InterceptorCallICInvalidatedCacheable) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(NoBlockGetterX); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "proto1 = new Object();" "proto2 = new Object();" "o.__proto__ = proto1;" "proto1.__proto__ = proto2;" "proto2.y = function(x) { return x + 1; };" // Invoke it many times to compile a stub "for (var i = 0; i < 7; i++) {" " o.y(42);" "}" "proto1.y = function(x) { return x - 1; };" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result += o.y(42);" "}"); CHECK_EQ(41 * 7, value->Int32Value()); } static v8::Handle<Value> call_ic_function5; static v8::Handle<Value> InterceptorCallICGetter5(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); if (v8_str("x")->Equals(name)) return call_ic_function5; else return Local<Value>(); } // This test checks that if interceptor doesn't provide a function, // cached constant function is used THREADED_TEST(InterceptorCallICConstantFunctionUsed) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(NoBlockGetterX); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "function inc(x) { return x + 1; };" "inc(1);" "o.x = inc;" "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = o.x(42);" "}"); CHECK_EQ(43, value->Int32Value()); } // This test checks that if interceptor provides a function, // even if we cached constant function, interceptor's function // is invoked THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorCallICGetter5); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); call_ic_function5 = v8_compile("function f(x) { return x - 1; }; f")->Run(); v8::Handle<Value> value = CompileRun( "function inc(x) { return x + 1; };" "inc(1);" "o.x = inc;" "var result = 0;" "for (var i = 0; i < 1000; i++) {" " result = o.x(42);" "}"); CHECK_EQ(41, value->Int32Value()); } // Test the case when we stored constant function into // a stub, but it got invalidated later on THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(NoBlockGetterX); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "function inc(x) { return x + 1; };" "inc(1);" "proto1 = new Object();" "proto2 = new Object();" "o.__proto__ = proto1;" "proto1.__proto__ = proto2;" "proto2.y = inc;" // Invoke it many times to compile a stub "for (var i = 0; i < 7; i++) {" " o.y(42);" "}" "proto1.y = function(x) { return x - 1; };" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result += o.y(42);" "}"); CHECK_EQ(41 * 7, value->Int32Value()); } // Test the case when we stored constant function into // a stub, but it got invalidated later on due to override on // global object which is between interceptor and constant function' holders. THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(NoBlockGetterX); LocalContext context; context->Global()->Set(v8_str("o"), templ->NewInstance()); v8::Handle<Value> value = CompileRun( "function inc(x) { return x + 1; };" "inc(1);" "o.__proto__ = this;" "this.__proto__.y = inc;" // Invoke it many times to compile a stub "for (var i = 0; i < 7; i++) {" " if (o.y(42) != 43) throw 'oops: ' + o.y(42);" "}" "this.y = function(x) { return x - 1; };" "var result = 0;" "for (var i = 0; i < 7; i++) {" " result += o.y(42);" "}"); CHECK_EQ(41 * 7, value->Int32Value()); } // Test the case when actual function to call sits on global object. THREADED_TEST(InterceptorCallICCachedFromGlobal) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(); templ_o->SetNamedPropertyHandler(NoBlockGetterX); LocalContext context; context->Global()->Set(v8_str("o"), templ_o->NewInstance()); v8::Handle<Value> value = CompileRun( "try {" " o.__proto__ = this;" " for (var i = 0; i < 10; i++) {" " var v = o.parseFloat('239');" " if (v != 239) throw v;" // Now it should be ICed and keep a reference to parseFloat. " }" " var result = 0;" " for (var i = 0; i < 10; i++) {" " result += o.parseFloat('239');" " }" " result" "} catch(e) {" " e" "};"); CHECK_EQ(239 * 10, value->Int32Value()); } static int interceptor_call_count = 0; static v8::Handle<Value> InterceptorICRefErrorGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); if (v8_str("x")->Equals(name) && interceptor_call_count++ < 20) { return call_ic_function2; } return v8::Handle<Value>(); } // This test should hit load and call ICs for the interceptor case. // Once in a while, the interceptor will reply that a property was not // found in which case we should get a reference error. THREADED_TEST(InterceptorICReferenceErrors) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorICRefErrorGetter); LocalContext context(0, templ, v8::Handle<Value>()); call_ic_function2 = v8_compile("function h(x) { return x; }; h")->Run(); v8::Handle<Value> value = CompileRun( "function f() {" " for (var i = 0; i < 1000; i++) {" " try { x; } catch(e) { return true; }" " }" " return false;" "};" "f();"); CHECK_EQ(true, value->BooleanValue()); interceptor_call_count = 0; value = CompileRun( "function g() {" " for (var i = 0; i < 1000; i++) {" " try { x(42); } catch(e) { return true; }" " }" " return false;" "};" "g();"); CHECK_EQ(true, value->BooleanValue()); } static int interceptor_ic_exception_get_count = 0; static v8::Handle<Value> InterceptorICExceptionGetter( Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); if (v8_str("x")->Equals(name) && ++interceptor_ic_exception_get_count < 20) { return call_ic_function3; } if (interceptor_ic_exception_get_count == 20) { return v8::ThrowException(v8_num(42)); } // Do not handle get for properties other than x. return v8::Handle<Value>(); } // Test interceptor load/call IC where the interceptor throws an // exception once in a while. THREADED_TEST(InterceptorICGetterExceptions) { interceptor_ic_exception_get_count = 0; v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(InterceptorICExceptionGetter); LocalContext context(0, templ, v8::Handle<Value>()); call_ic_function3 = v8_compile("function h(x) { return x; }; h")->Run(); v8::Handle<Value> value = CompileRun( "function f() {" " for (var i = 0; i < 100; i++) {" " try { x; } catch(e) { return true; }" " }" " return false;" "};" "f();"); CHECK_EQ(true, value->BooleanValue()); interceptor_ic_exception_get_count = 0; value = CompileRun( "function f() {" " for (var i = 0; i < 100; i++) {" " try { x(42); } catch(e) { return true; }" " }" " return false;" "};" "f();"); CHECK_EQ(true, value->BooleanValue()); } static int interceptor_ic_exception_set_count = 0; static v8::Handle<Value> InterceptorICExceptionSetter( Local<String> key, Local<Value> value, const AccessorInfo&) { ApiTestFuzzer::Fuzz(); if (++interceptor_ic_exception_set_count > 20) { return v8::ThrowException(v8_num(42)); } // Do not actually handle setting. return v8::Handle<Value>(); } // Test interceptor store IC where the interceptor throws an exception // once in a while. THREADED_TEST(InterceptorICSetterExceptions) { interceptor_ic_exception_set_count = 0; v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(0, InterceptorICExceptionSetter); LocalContext context(0, templ, v8::Handle<Value>()); v8::Handle<Value> value = CompileRun( "function f() {" " for (var i = 0; i < 100; i++) {" " try { x = 42; } catch(e) { return true; }" " }" " return false;" "};" "f();"); CHECK_EQ(true, value->BooleanValue()); } // Test that we ignore null interceptors. THREADED_TEST(NullNamedInterceptor) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetNamedPropertyHandler(0); LocalContext context; templ->Set("x", v8_num(42)); v8::Handle<v8::Object> obj = templ->NewInstance(); context->Global()->Set(v8_str("obj"), obj); v8::Handle<Value> value = CompileRun("obj.x"); CHECK(value->IsInt32()); CHECK_EQ(42, value->Int32Value()); } // Test that we ignore null interceptors. THREADED_TEST(NullIndexedInterceptor) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(); templ->SetIndexedPropertyHandler(0); LocalContext context; templ->Set("42", v8_num(42)); v8::Handle<v8::Object> obj = templ->NewInstance(); context->Global()->Set(v8_str("obj"), obj); v8::Handle<Value> value = CompileRun("obj[42]"); CHECK(value->IsInt32()); CHECK_EQ(42, value->Int32Value()); } static v8::Handle<Value> ParentGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); return v8_num(1); } static v8::Handle<Value> ChildGetter(Local<String> name, const AccessorInfo& info) { ApiTestFuzzer::Fuzz(); return v8_num(42); } THREADED_TEST(Overriding) { v8::HandleScope scope; LocalContext context; // Parent template. Local<v8::FunctionTemplate> parent_templ = v8::FunctionTemplate::New(); Local<ObjectTemplate> parent_instance_templ = parent_templ->InstanceTemplate(); parent_instance_templ->SetAccessor(v8_str("f"), ParentGetter); // Template that inherits from the parent template. Local<v8::FunctionTemplate> child_templ = v8::FunctionTemplate::New(); Local<ObjectTemplate> child_instance_templ = child_templ->InstanceTemplate(); child_templ->Inherit(parent_templ); // Override 'f'. The child version of 'f' should get called for child // instances. child_instance_templ->SetAccessor(v8_str("f"), ChildGetter); // Add 'g' twice. The 'g' added last should get called for instances. child_instance_templ->SetAccessor(v8_str("g"), ParentGetter); child_instance_templ->SetAccessor(v8_str("g"), ChildGetter); // Add 'h' as an accessor to the proto template with ReadOnly attributes // so 'h' can be shadowed on the instance object. Local<ObjectTemplate> child_proto_templ = child_templ->PrototypeTemplate(); child_proto_templ->SetAccessor(v8_str("h"), ParentGetter, 0, v8::Handle<Value>(), v8::DEFAULT, v8::ReadOnly); // Add 'i' as an accessor to the instance template with ReadOnly attributes // but the attribute does not have effect because it is duplicated with // NULL setter. child_instance_templ->SetAccessor(v8_str("i"), ChildGetter, 0, v8::Handle<Value>(), v8::DEFAULT, v8::ReadOnly); // Instantiate the child template. Local<v8::Object> instance = child_templ->GetFunction()->NewInstance(); // Check that the child function overrides the parent one. context->Global()->Set(v8_str("o"), instance); Local<Value> value = v8_compile("o.f")->Run(); // Check that the 'g' that was added last is hit. CHECK_EQ(42, value->Int32Value()); value = v8_compile("o.g")->Run(); CHECK_EQ(42, value->Int32Value()); // Check 'h' can be shadowed. value = v8_compile("o.h = 3; o.h")->Run(); CHECK_EQ(3, value->Int32Value()); // Check 'i' is cannot be shadowed or changed. value = v8_compile("o.i = 3; o.i")->Run(); CHECK_EQ(42, value->Int32Value()); } static v8::Handle<Value> IsConstructHandler(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); if (args.IsConstructCall()) { return v8::Boolean::New(true); } return v8::Boolean::New(false); } THREADED_TEST(IsConstructCall) { v8::HandleScope scope; // Function template with call handler. Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->SetCallHandler(IsConstructHandler); LocalContext context; context->Global()->Set(v8_str("f"), templ->GetFunction()); Local<Value> value = v8_compile("f()")->Run(); CHECK(!value->BooleanValue()); value = v8_compile("new f()")->Run(); CHECK(value->BooleanValue()); } THREADED_TEST(ObjectProtoToString) { v8::HandleScope scope; Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(); templ->SetClassName(v8_str("MyClass")); LocalContext context; Local<String> customized_tostring = v8_str("customized toString"); // Replace Object.prototype.toString v8_compile("Object.prototype.toString = function() {" " return 'customized toString';" "}")->Run(); // Normal ToString call should call replaced Object.prototype.toString Local<v8::Object> instance = templ->GetFunction()->NewInstance(); Local<String> value = instance->ToString(); CHECK(value->IsString() && value->Equals(customized_tostring)); // ObjectProtoToString should not call replace toString function. value = instance->ObjectProtoToString(); CHECK(value->IsString() && value->Equals(v8_str("[object MyClass]"))); // Check global value = context->Global()->ObjectProtoToString(); CHECK(value->IsString() && value->Equals(v8_str("[object global]"))); // Check ordinary object Local<Value> object = v8_compile("new Object()")->Run(); value = Local<v8::Object>::Cast(object)->ObjectProtoToString(); CHECK(value->IsString() && value->Equals(v8_str("[object Object]"))); } bool ApiTestFuzzer::fuzzing_ = false; v8::internal::Semaphore* ApiTestFuzzer::all_tests_done_= v8::internal::OS::CreateSemaphore(0); int ApiTestFuzzer::active_tests_; int ApiTestFuzzer::tests_being_run_; int ApiTestFuzzer::current_; // We are in a callback and want to switch to another thread (if we // are currently running the thread fuzzing test). void ApiTestFuzzer::Fuzz() { if (!fuzzing_) return; ApiTestFuzzer* test = RegisterThreadedTest::nth(current_)->fuzzer_; test->ContextSwitch(); } // Let the next thread go. Since it is also waiting on the V8 lock it may // not start immediately. bool ApiTestFuzzer::NextThread() { int test_position = GetNextTestNumber(); const char* test_name = RegisterThreadedTest::nth(current_)->name(); if (test_position == current_) { if (kLogThreading) printf("Stay with %s\n", test_name); return false; } if (kLogThreading) { printf("Switch from %s to %s\n", test_name, RegisterThreadedTest::nth(test_position)->name()); } current_ = test_position; RegisterThreadedTest::nth(current_)->fuzzer_->gate_->Signal(); return true; } void ApiTestFuzzer::Run() { // When it is our turn... gate_->Wait(); { // ... get the V8 lock and start running the test. v8::Locker locker; CallTest(); } // This test finished. active_ = false; active_tests_--; // If it was the last then signal that fact. if (active_tests_ == 0) { all_tests_done_->Signal(); } else { // Otherwise select a new test and start that. NextThread(); } } static unsigned linear_congruential_generator; void ApiTestFuzzer::Setup(PartOfTest part) { linear_congruential_generator = i::FLAG_testing_prng_seed; fuzzing_ = true; int start = (part == FIRST_PART) ? 0 : (RegisterThreadedTest::count() >> 1); int end = (part == FIRST_PART) ? (RegisterThreadedTest::count() >> 1) : RegisterThreadedTest::count(); active_tests_ = tests_being_run_ = end - start; for (int i = 0; i < tests_being_run_; i++) { RegisterThreadedTest::nth(i)->fuzzer_ = new ApiTestFuzzer(i + start); } for (int i = 0; i < active_tests_; i++) { RegisterThreadedTest::nth(i)->fuzzer_->Start(); } } static void CallTestNumber(int test_number) { (RegisterThreadedTest::nth(test_number)->callback())(); } void ApiTestFuzzer::RunAllTests() { // Set off the first test. current_ = -1; NextThread(); // Wait till they are all done. all_tests_done_->Wait(); } int ApiTestFuzzer::GetNextTestNumber() { int next_test; do { next_test = (linear_congruential_generator >> 16) % tests_being_run_; linear_congruential_generator *= 1664525u; linear_congruential_generator += 1013904223u; } while (!RegisterThreadedTest::nth(next_test)->fuzzer_->active_); return next_test; } void ApiTestFuzzer::ContextSwitch() { // If the new thread is the same as the current thread there is nothing to do. if (NextThread()) { // Now it can start. v8::Unlocker unlocker; // Wait till someone starts us again. gate_->Wait(); // And we're off. } } void ApiTestFuzzer::TearDown() { fuzzing_ = false; for (int i = 0; i < RegisterThreadedTest::count(); i++) { ApiTestFuzzer *fuzzer = RegisterThreadedTest::nth(i)->fuzzer_; if (fuzzer != NULL) fuzzer->Join(); } } // Lets not be needlessly self-referential. TEST(Threading) { ApiTestFuzzer::Setup(ApiTestFuzzer::FIRST_PART); ApiTestFuzzer::RunAllTests(); ApiTestFuzzer::TearDown(); } TEST(Threading2) { ApiTestFuzzer::Setup(ApiTestFuzzer::SECOND_PART); ApiTestFuzzer::RunAllTests(); ApiTestFuzzer::TearDown(); } void ApiTestFuzzer::CallTest() { if (kLogThreading) printf("Start test %d\n", test_number_); CallTestNumber(test_number_); if (kLogThreading) printf("End test %d\n", test_number_); } static v8::Handle<Value> ThrowInJS(const v8::Arguments& args) { CHECK(v8::Locker::IsLocked()); ApiTestFuzzer::Fuzz(); v8::Unlocker unlocker; const char* code = "throw 7;"; { v8::Locker nested_locker; v8::HandleScope scope; v8::Handle<Value> exception; { v8::TryCatch try_catch; v8::Handle<Value> value = CompileRun(code); CHECK(value.IsEmpty()); CHECK(try_catch.HasCaught()); // Make sure to wrap the exception in a new handle because // the handle returned from the TryCatch is destroyed // when the TryCatch is destroyed. exception = Local<Value>::New(try_catch.Exception()); } return v8::ThrowException(exception); } } static v8::Handle<Value> ThrowInJSNoCatch(const v8::Arguments& args) { CHECK(v8::Locker::IsLocked()); ApiTestFuzzer::Fuzz(); v8::Unlocker unlocker; const char* code = "throw 7;"; { v8::Locker nested_locker; v8::HandleScope scope; v8::Handle<Value> value = CompileRun(code); CHECK(value.IsEmpty()); return v8_str("foo"); } } // These are locking tests that don't need to be run again // as part of the locking aggregation tests. TEST(NestedLockers) { v8::Locker locker; CHECK(v8::Locker::IsLocked()); v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(ThrowInJS); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("throw_in_js"), fun); Local<Script> script = v8_compile("(function () {" " try {" " throw_in_js();" " return 42;" " } catch (e) {" " return e * 13;" " }" "})();"); CHECK_EQ(91, script->Run()->Int32Value()); } // These are locking tests that don't need to be run again // as part of the locking aggregation tests. TEST(NestedLockersNoTryCatch) { v8::Locker locker; v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(ThrowInJSNoCatch); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("throw_in_js"), fun); Local<Script> script = v8_compile("(function () {" " try {" " throw_in_js();" " return 42;" " } catch (e) {" " return e * 13;" " }" "})();"); CHECK_EQ(91, script->Run()->Int32Value()); } THREADED_TEST(RecursiveLocking) { v8::Locker locker; { v8::Locker locker2; CHECK(v8::Locker::IsLocked()); } } static v8::Handle<Value> UnlockForAMoment(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); v8::Unlocker unlocker; return v8::Undefined(); } THREADED_TEST(LockUnlockLock) { { v8::Locker locker; v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(UnlockForAMoment); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("unlock_for_a_moment"), fun); Local<Script> script = v8_compile("(function () {" " unlock_for_a_moment();" " return 42;" "})();"); CHECK_EQ(42, script->Run()->Int32Value()); } { v8::Locker locker; v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(UnlockForAMoment); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("unlock_for_a_moment"), fun); Local<Script> script = v8_compile("(function () {" " unlock_for_a_moment();" " return 42;" "})();"); CHECK_EQ(42, script->Run()->Int32Value()); } } static int GetGlobalObjectsCount() { int count = 0; v8::internal::HeapIterator it; for (i::HeapObject* object = it.next(); object != NULL; object = it.next()) if (object->IsJSGlobalObject()) count++; return count; } static int GetSurvivingGlobalObjectsCount() { // We need to collect all garbage twice to be sure that everything // has been collected. This is because inline caches are cleared in // the first garbage collection but some of the maps have already // been marked at that point. Therefore some of the maps are not // collected until the second garbage collection. v8::internal::Heap::CollectAllGarbage(false); v8::internal::Heap::CollectAllGarbage(false); int count = GetGlobalObjectsCount(); #ifdef DEBUG if (count > 0) v8::internal::Heap::TracePathToGlobal(); #endif return count; } TEST(DontLeakGlobalObjects) { // Regression test for issues 1139850 and 1174891. v8::V8::Initialize(); int count = GetSurvivingGlobalObjectsCount(); for (int i = 0; i < 5; i++) { { v8::HandleScope scope; LocalContext context; } CHECK_EQ(count, GetSurvivingGlobalObjectsCount()); { v8::HandleScope scope; LocalContext context; v8_compile("Date")->Run(); } CHECK_EQ(count, GetSurvivingGlobalObjectsCount()); { v8::HandleScope scope; LocalContext context; v8_compile("/aaa/")->Run(); } CHECK_EQ(count, GetSurvivingGlobalObjectsCount()); { v8::HandleScope scope; const char* extension_list[] = { "v8/gc" }; v8::ExtensionConfiguration extensions(1, extension_list); LocalContext context(&extensions); v8_compile("gc();")->Run(); } CHECK_EQ(count, GetSurvivingGlobalObjectsCount()); } } v8::Persistent<v8::Object> some_object; v8::Persistent<v8::Object> bad_handle; void NewPersistentHandleCallback(v8::Persistent<v8::Value>, void*) { v8::HandleScope scope; bad_handle = v8::Persistent<v8::Object>::New(some_object); } THREADED_TEST(NewPersistentHandleFromWeakCallback) { LocalContext context; v8::Persistent<v8::Object> handle1, handle2; { v8::HandleScope scope; some_object = v8::Persistent<v8::Object>::New(v8::Object::New()); handle1 = v8::Persistent<v8::Object>::New(v8::Object::New()); handle2 = v8::Persistent<v8::Object>::New(v8::Object::New()); } // Note: order is implementation dependent alas: currently // global handle nodes are processed by PostGarbageCollectionProcessing // in reverse allocation order, so if second allocated handle is deleted, // weak callback of the first handle would be able to 'reallocate' it. handle1.MakeWeak(NULL, NewPersistentHandleCallback); handle2.Dispose(); i::Heap::CollectAllGarbage(false); } v8::Persistent<v8::Object> to_be_disposed; void DisposeAndForceGcCallback(v8::Persistent<v8::Value> handle, void*) { to_be_disposed.Dispose(); i::Heap::CollectAllGarbage(false); } THREADED_TEST(DoNotUseDeletedNodesInSecondLevelGc) { LocalContext context; v8::Persistent<v8::Object> handle1, handle2; { v8::HandleScope scope; handle1 = v8::Persistent<v8::Object>::New(v8::Object::New()); handle2 = v8::Persistent<v8::Object>::New(v8::Object::New()); } handle1.MakeWeak(NULL, DisposeAndForceGcCallback); to_be_disposed = handle2; i::Heap::CollectAllGarbage(false); } void DisposingCallback(v8::Persistent<v8::Value> handle, void*) { handle.Dispose(); } void HandleCreatingCallback(v8::Persistent<v8::Value> handle, void*) { v8::HandleScope scope; v8::Persistent<v8::Object>::New(v8::Object::New()); } THREADED_TEST(NoGlobalHandlesOrphaningDueToWeakCallback) { LocalContext context; v8::Persistent<v8::Object> handle1, handle2, handle3; { v8::HandleScope scope; handle3 = v8::Persistent<v8::Object>::New(v8::Object::New()); handle2 = v8::Persistent<v8::Object>::New(v8::Object::New()); handle1 = v8::Persistent<v8::Object>::New(v8::Object::New()); } handle2.MakeWeak(NULL, DisposingCallback); handle3.MakeWeak(NULL, HandleCreatingCallback); i::Heap::CollectAllGarbage(false); } THREADED_TEST(CheckForCrossContextObjectLiterals) { v8::V8::Initialize(); const int nof = 2; const char* sources[nof] = { "try { [ 2, 3, 4 ].forEach(5); } catch(e) { e.toString(); }", "Object()" }; for (int i = 0; i < nof; i++) { const char* source = sources[i]; { v8::HandleScope scope; LocalContext context; CompileRun(source); } { v8::HandleScope scope; LocalContext context; CompileRun(source); } } } static v8::Handle<Value> NestedScope(v8::Persistent<Context> env) { v8::HandleScope inner; env->Enter(); v8::Handle<Value> three = v8_num(3); v8::Handle<Value> value = inner.Close(three); env->Exit(); return value; } THREADED_TEST(NestedHandleScopeAndContexts) { v8::HandleScope outer; v8::Persistent<Context> env = Context::New(); env->Enter(); v8::Handle<Value> value = NestedScope(env); v8::Handle<String> str = value->ToString(); env->Exit(); env.Dispose(); } THREADED_TEST(ExternalAllocatedMemory) { v8::HandleScope outer; v8::Persistent<Context> env = Context::New(); const int kSize = 1024*1024; CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(kSize), kSize); CHECK_EQ(v8::V8::AdjustAmountOfExternalAllocatedMemory(-kSize), 0); } THREADED_TEST(DisposeEnteredContext) { v8::HandleScope scope; LocalContext outer; { v8::Persistent<v8::Context> inner = v8::Context::New(); inner->Enter(); inner.Dispose(); inner.Clear(); inner->Exit(); } } // Regression test for issue 54, object templates with internal fields // but no accessors or interceptors did not get their internal field // count set on instances. THREADED_TEST(Regress54) { v8::HandleScope outer; LocalContext context; static v8::Persistent<v8::ObjectTemplate> templ; if (templ.IsEmpty()) { v8::HandleScope inner; v8::Handle<v8::ObjectTemplate> local = v8::ObjectTemplate::New(); local->SetInternalFieldCount(1); templ = v8::Persistent<v8::ObjectTemplate>::New(inner.Close(local)); } v8::Handle<v8::Object> result = templ->NewInstance(); CHECK_EQ(1, result->InternalFieldCount()); } // If part of the threaded tests, this test makes ThreadingTest fail // on mac. TEST(CatchStackOverflow) { v8::HandleScope scope; LocalContext context; v8::TryCatch try_catch; v8::Handle<v8::Script> script = v8::Script::Compile(v8::String::New( "function f() {" " return f();" "}" "" "f();")); v8::Handle<v8::Value> result = script->Run(); CHECK(result.IsEmpty()); } static void CheckTryCatchSourceInfo(v8::Handle<v8::Script> script, const char* resource_name, int line_offset) { v8::HandleScope scope; v8::TryCatch try_catch; v8::Handle<v8::Value> result = script->Run(); CHECK(result.IsEmpty()); CHECK(try_catch.HasCaught()); v8::Handle<v8::Message> message = try_catch.Message(); CHECK(!message.IsEmpty()); CHECK_EQ(10 + line_offset, message->GetLineNumber()); CHECK_EQ(91, message->GetStartPosition()); CHECK_EQ(92, message->GetEndPosition()); CHECK_EQ(2, message->GetStartColumn()); CHECK_EQ(3, message->GetEndColumn()); v8::String::AsciiValue line(message->GetSourceLine()); CHECK_EQ(" throw 'nirk';", *line); v8::String::AsciiValue name(message->GetScriptResourceName()); CHECK_EQ(resource_name, *name); } THREADED_TEST(TryCatchSourceInfo) { v8::HandleScope scope; LocalContext context; v8::Handle<v8::String> source = v8::String::New( "function Foo() {\n" " return Bar();\n" "}\n" "\n" "function Bar() {\n" " return Baz();\n" "}\n" "\n" "function Baz() {\n" " throw 'nirk';\n" "}\n" "\n" "Foo();\n"); const char* resource_name; v8::Handle<v8::Script> script; resource_name = "test.js"; script = v8::Script::Compile(source, v8::String::New(resource_name)); CheckTryCatchSourceInfo(script, resource_name, 0); resource_name = "test1.js"; v8::ScriptOrigin origin1(v8::String::New(resource_name)); script = v8::Script::Compile(source, &origin1); CheckTryCatchSourceInfo(script, resource_name, 0); resource_name = "test2.js"; v8::ScriptOrigin origin2(v8::String::New(resource_name), v8::Integer::New(7)); script = v8::Script::Compile(source, &origin2); CheckTryCatchSourceInfo(script, resource_name, 7); } THREADED_TEST(CompilationCache) { v8::HandleScope scope; LocalContext context; v8::Handle<v8::String> source0 = v8::String::New("1234"); v8::Handle<v8::String> source1 = v8::String::New("1234"); v8::Handle<v8::Script> script0 = v8::Script::Compile(source0, v8::String::New("test.js")); v8::Handle<v8::Script> script1 = v8::Script::Compile(source1, v8::String::New("test.js")); v8::Handle<v8::Script> script2 = v8::Script::Compile(source0); // different origin CHECK_EQ(1234, script0->Run()->Int32Value()); CHECK_EQ(1234, script1->Run()->Int32Value()); CHECK_EQ(1234, script2->Run()->Int32Value()); } static v8::Handle<Value> FunctionNameCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); return v8_num(42); } THREADED_TEST(CallbackFunctionName) { v8::HandleScope scope; LocalContext context; Local<ObjectTemplate> t = ObjectTemplate::New(); t->Set(v8_str("asdf"), v8::FunctionTemplate::New(FunctionNameCallback)); context->Global()->Set(v8_str("obj"), t->NewInstance()); v8::Handle<v8::Value> value = CompileRun("obj.asdf.name"); CHECK(value->IsString()); v8::String::AsciiValue name(value); CHECK_EQ("asdf", *name); } THREADED_TEST(DateAccess) { v8::HandleScope scope; LocalContext context; v8::Handle<v8::Value> date = v8::Date::New(1224744689038.0); CHECK(date->IsDate()); CHECK_EQ(1224744689038.0, v8::Handle<v8::Date>::Cast(date)->NumberValue()); } void CheckProperties(v8::Handle<v8::Value> val, int elmc, const char* elmv[]) { v8::Handle<v8::Object> obj = v8::Handle<v8::Object>::Cast(val); v8::Handle<v8::Array> props = obj->GetPropertyNames(); CHECK_EQ(elmc, props->Length()); for (int i = 0; i < elmc; i++) { v8::String::Utf8Value elm(props->Get(v8::Integer::New(i))); CHECK_EQ(elmv[i], *elm); } } THREADED_TEST(PropertyEnumeration) { v8::HandleScope scope; LocalContext context; v8::Handle<v8::Value> obj = v8::Script::Compile(v8::String::New( "var result = [];" "result[0] = {};" "result[1] = {a: 1, b: 2};" "result[2] = [1, 2, 3];" "var proto = {x: 1, y: 2, z: 3};" "var x = { __proto__: proto, w: 0, z: 1 };" "result[3] = x;" "result;"))->Run(); v8::Handle<v8::Array> elms = v8::Handle<v8::Array>::Cast(obj); CHECK_EQ(4, elms->Length()); int elmc0 = 0; const char** elmv0 = NULL; CheckProperties(elms->Get(v8::Integer::New(0)), elmc0, elmv0); int elmc1 = 2; const char* elmv1[] = {"a", "b"}; CheckProperties(elms->Get(v8::Integer::New(1)), elmc1, elmv1); int elmc2 = 3; const char* elmv2[] = {"0", "1", "2"}; CheckProperties(elms->Get(v8::Integer::New(2)), elmc2, elmv2); int elmc3 = 4; const char* elmv3[] = {"w", "z", "x", "y"}; CheckProperties(elms->Get(v8::Integer::New(3)), elmc3, elmv3); } static bool NamedSetAccessBlocker(Local<v8::Object> obj, Local<Value> name, v8::AccessType type, Local<Value> data) { return type != v8::ACCESS_SET; } static bool IndexedSetAccessBlocker(Local<v8::Object> obj, uint32_t key, v8::AccessType type, Local<Value> data) { return type != v8::ACCESS_SET; } THREADED_TEST(DisableAccessChecksWhileConfiguring) { v8::HandleScope scope; LocalContext context; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetAccessCheckCallbacks(NamedSetAccessBlocker, IndexedSetAccessBlocker); templ->Set(v8_str("x"), v8::True()); Local<v8::Object> instance = templ->NewInstance(); context->Global()->Set(v8_str("obj"), instance); Local<Value> value = CompileRun("obj.x"); CHECK(value->BooleanValue()); } static bool NamedGetAccessBlocker(Local<v8::Object> obj, Local<Value> name, v8::AccessType type, Local<Value> data) { return false; } static bool IndexedGetAccessBlocker(Local<v8::Object> obj, uint32_t key, v8::AccessType type, Local<Value> data) { return false; } THREADED_TEST(AccessChecksReenabledCorrectly) { v8::HandleScope scope; LocalContext context; Local<ObjectTemplate> templ = ObjectTemplate::New(); templ->SetAccessCheckCallbacks(NamedGetAccessBlocker, IndexedGetAccessBlocker); templ->Set(v8_str("a"), v8_str("a")); // Add more than 8 (see kMaxFastProperties) properties // so that the constructor will force copying map. // Cannot sprintf, gcc complains unsafety. char buf[4]; for (char i = '0'; i <= '9' ; i++) { buf[0] = i; for (char j = '0'; j <= '9'; j++) { buf[1] = j; for (char k = '0'; k <= '9'; k++) { buf[2] = k; buf[3] = 0; templ->Set(v8_str(buf), v8::Number::New(k)); } } } Local<v8::Object> instance_1 = templ->NewInstance(); context->Global()->Set(v8_str("obj_1"), instance_1); Local<Value> value_1 = CompileRun("obj_1.a"); CHECK(value_1->IsUndefined()); Local<v8::Object> instance_2 = templ->NewInstance(); context->Global()->Set(v8_str("obj_2"), instance_2); Local<Value> value_2 = CompileRun("obj_2.a"); CHECK(value_2->IsUndefined()); } // This tests that access check information remains on the global // object template when creating contexts. THREADED_TEST(AccessControlRepeatedContextCreation) { v8::HandleScope handle_scope; v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); global_template->SetAccessCheckCallbacks(NamedSetAccessBlocker, IndexedSetAccessBlocker); i::Handle<i::ObjectTemplateInfo> internal_template = v8::Utils::OpenHandle(*global_template); CHECK(!internal_template->constructor()->IsUndefined()); i::Handle<i::FunctionTemplateInfo> constructor( i::FunctionTemplateInfo::cast(internal_template->constructor())); CHECK(!constructor->access_check_info()->IsUndefined()); v8::Persistent<Context> context0 = Context::New(NULL, global_template); CHECK(!constructor->access_check_info()->IsUndefined()); } THREADED_TEST(TurnOnAccessCheck) { v8::HandleScope handle_scope; // Create an environment with access check to the global object disabled by // default. v8::Handle<v8::ObjectTemplate> global_template = v8::ObjectTemplate::New(); global_template->SetAccessCheckCallbacks(NamedGetAccessBlocker, IndexedGetAccessBlocker, v8::Handle<v8::Value>(), false); v8::Persistent<Context> context = Context::New(NULL, global_template); Context::Scope context_scope(context); // Set up a property and a number of functions. context->Global()->Set(v8_str("a"), v8_num(1)); CompileRun("function f1() {return a;}" "function f2() {return a;}" "function g1() {return h();}" "function g2() {return h();}" "function h() {return 1;}"); Local<Function> f1 = Local<Function>::Cast(context->Global()->Get(v8_str("f1"))); Local<Function> f2 = Local<Function>::Cast(context->Global()->Get(v8_str("f2"))); Local<Function> g1 = Local<Function>::Cast(context->Global()->Get(v8_str("g1"))); Local<Function> g2 = Local<Function>::Cast(context->Global()->Get(v8_str("g2"))); Local<Function> h = Local<Function>::Cast(context->Global()->Get(v8_str("h"))); // Get the global object. v8::Handle<v8::Object> global = context->Global(); // Call f1 one time and f2 a number of times. This will ensure that f1 still // uses the runtime system to retreive property a whereas f2 uses global load // inline cache. CHECK(f1->Call(global, 0, NULL)->Equals(v8_num(1))); for (int i = 0; i < 4; i++) { CHECK(f2->Call(global, 0, NULL)->Equals(v8_num(1))); } // Same for g1 and g2. CHECK(g1->Call(global, 0, NULL)->Equals(v8_num(1))); for (int i = 0; i < 4; i++) { CHECK(g2->Call(global, 0, NULL)->Equals(v8_num(1))); } // Detach the global and turn on access check. context->DetachGlobal(); context->Global()->TurnOnAccessCheck(); // Failing access check to property get results in undefined. CHECK(f1->Call(global, 0, NULL)->IsUndefined()); CHECK(f2->Call(global, 0, NULL)->IsUndefined()); // Failing access check to function call results in exception. CHECK(g1->Call(global, 0, NULL).IsEmpty()); CHECK(g2->Call(global, 0, NULL).IsEmpty()); // No failing access check when just returning a constant. CHECK(h->Call(global, 0, NULL)->Equals(v8_num(1))); } // This test verifies that pre-compilation (aka preparsing) can be called // without initializing the whole VM. Thus we cannot run this test in a // multi-threaded setup. TEST(PreCompile) { // TODO(155): This test would break without the initialization of V8. This is // a workaround for now to make this test not fail. v8::V8::Initialize(); const char *script = "function foo(a) { return a+1; }"; v8::ScriptData *sd = v8::ScriptData::PreCompile(script, i::StrLength(script)); CHECK_NE(sd->Length(), 0); CHECK_NE(sd->Data(), NULL); CHECK(!sd->HasError()); delete sd; } TEST(PreCompileWithError) { v8::V8::Initialize(); const char *script = "function foo(a) { return 1 * * 2; }"; v8::ScriptData *sd = v8::ScriptData::PreCompile(script, i::StrLength(script)); CHECK(sd->HasError()); delete sd; } TEST(Regress31661) { v8::V8::Initialize(); const char *script = " The Definintive Guide"; v8::ScriptData *sd = v8::ScriptData::PreCompile(script, i::StrLength(script)); CHECK(sd->HasError()); delete sd; } // This tests that we do not allow dictionary load/call inline caches // to use functions that have not yet been compiled. The potential // problem of loading a function that has not yet been compiled can // arise because we share code between contexts via the compilation // cache. THREADED_TEST(DictionaryICLoadedFunction) { v8::HandleScope scope; // Test LoadIC. for (int i = 0; i < 2; i++) { LocalContext context; context->Global()->Set(v8_str("tmp"), v8::True()); context->Global()->Delete(v8_str("tmp")); CompileRun("for (var j = 0; j < 10; j++) new RegExp('');"); } // Test CallIC. for (int i = 0; i < 2; i++) { LocalContext context; context->Global()->Set(v8_str("tmp"), v8::True()); context->Global()->Delete(v8_str("tmp")); CompileRun("for (var j = 0; j < 10; j++) RegExp('')"); } } // Test that cross-context new calls use the context of the callee to // create the new JavaScript object. THREADED_TEST(CrossContextNew) { v8::HandleScope scope; v8::Persistent<Context> context0 = Context::New(); v8::Persistent<Context> context1 = Context::New(); // Allow cross-domain access. Local<String> token = v8_str("<security token>"); context0->SetSecurityToken(token); context1->SetSecurityToken(token); // Set an 'x' property on the Object prototype and define a // constructor function in context0. context0->Enter(); CompileRun("Object.prototype.x = 42; function C() {};"); context0->Exit(); // Call the constructor function from context0 and check that the // result has the 'x' property. context1->Enter(); context1->Global()->Set(v8_str("other"), context0->Global()); Local<Value> value = CompileRun("var instance = new other.C(); instance.x"); CHECK(value->IsInt32()); CHECK_EQ(42, value->Int32Value()); context1->Exit(); // Dispose the contexts to allow them to be garbage collected. context0.Dispose(); context1.Dispose(); } class RegExpInterruptTest { public: RegExpInterruptTest() : block_(NULL) {} ~RegExpInterruptTest() { delete block_; } void RunTest() { block_ = i::OS::CreateSemaphore(0); gc_count_ = 0; gc_during_regexp_ = 0; regexp_success_ = false; gc_success_ = false; GCThread gc_thread(this); gc_thread.Start(); v8::Locker::StartPreemption(1); LongRunningRegExp(); { v8::Unlocker unlock; gc_thread.Join(); } v8::Locker::StopPreemption(); CHECK(regexp_success_); CHECK(gc_success_); } private: // Number of garbage collections required. static const int kRequiredGCs = 5; class GCThread : public i::Thread { public: explicit GCThread(RegExpInterruptTest* test) : test_(test) {} virtual void Run() { test_->CollectGarbage(); } private: RegExpInterruptTest* test_; }; void CollectGarbage() { block_->Wait(); while (gc_during_regexp_ < kRequiredGCs) { { v8::Locker lock; // TODO(lrn): Perhaps create some garbage before collecting. i::Heap::CollectAllGarbage(false); gc_count_++; } i::OS::Sleep(1); } gc_success_ = true; } void LongRunningRegExp() { block_->Signal(); // Enable garbage collection thread on next preemption. int rounds = 0; while (gc_during_regexp_ < kRequiredGCs) { int gc_before = gc_count_; { // Match 15-30 "a"'s against 14 and a "b". const char* c_source = "/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/" ".exec('aaaaaaaaaaaaaaab') === null"; Local<String> source = String::New(c_source); Local<Script> script = Script::Compile(source); Local<Value> result = script->Run(); if (!result->BooleanValue()) { gc_during_regexp_ = kRequiredGCs; // Allow gc thread to exit. return; } } { // Match 15-30 "a"'s against 15 and a "b". const char* c_source = "/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/" ".exec('aaaaaaaaaaaaaaaab')[0] === 'aaaaaaaaaaaaaaaa'"; Local<String> source = String::New(c_source); Local<Script> script = Script::Compile(source); Local<Value> result = script->Run(); if (!result->BooleanValue()) { gc_during_regexp_ = kRequiredGCs; return; } } int gc_after = gc_count_; gc_during_regexp_ += gc_after - gc_before; rounds++; i::OS::Sleep(1); } regexp_success_ = true; } i::Semaphore* block_; int gc_count_; int gc_during_regexp_; bool regexp_success_; bool gc_success_; }; // Test that a regular expression execution can be interrupted and // survive a garbage collection. TEST(RegExpInterruption) { v8::Locker lock; v8::V8::Initialize(); v8::HandleScope scope; Local<Context> local_env; { LocalContext env; local_env = env.local(); } // Local context should still be live. CHECK(!local_env.IsEmpty()); local_env->Enter(); // Should complete without problems. RegExpInterruptTest().RunTest(); local_env->Exit(); } class ApplyInterruptTest { public: ApplyInterruptTest() : block_(NULL) {} ~ApplyInterruptTest() { delete block_; } void RunTest() { block_ = i::OS::CreateSemaphore(0); gc_count_ = 0; gc_during_apply_ = 0; apply_success_ = false; gc_success_ = false; GCThread gc_thread(this); gc_thread.Start(); v8::Locker::StartPreemption(1); LongRunningApply(); { v8::Unlocker unlock; gc_thread.Join(); } v8::Locker::StopPreemption(); CHECK(apply_success_); CHECK(gc_success_); } private: // Number of garbage collections required. static const int kRequiredGCs = 2; class GCThread : public i::Thread { public: explicit GCThread(ApplyInterruptTest* test) : test_(test) {} virtual void Run() { test_->CollectGarbage(); } private: ApplyInterruptTest* test_; }; void CollectGarbage() { block_->Wait(); while (gc_during_apply_ < kRequiredGCs) { { v8::Locker lock; i::Heap::CollectAllGarbage(false); gc_count_++; } i::OS::Sleep(1); } gc_success_ = true; } void LongRunningApply() { block_->Signal(); int rounds = 0; while (gc_during_apply_ < kRequiredGCs) { int gc_before = gc_count_; { const char* c_source = "function do_very_little(bar) {" " this.foo = bar;" "}" "for (var i = 0; i < 100000; i++) {" " do_very_little.apply(this, ['bar']);" "}"; Local<String> source = String::New(c_source); Local<Script> script = Script::Compile(source); Local<Value> result = script->Run(); // Check that no exception was thrown. CHECK(!result.IsEmpty()); } int gc_after = gc_count_; gc_during_apply_ += gc_after - gc_before; rounds++; } apply_success_ = true; } i::Semaphore* block_; int gc_count_; int gc_during_apply_; bool apply_success_; bool gc_success_; }; // Test that nothing bad happens if we get a preemption just when we were // about to do an apply(). TEST(ApplyInterruption) { v8::Locker lock; v8::V8::Initialize(); v8::HandleScope scope; Local<Context> local_env; { LocalContext env; local_env = env.local(); } // Local context should still be live. CHECK(!local_env.IsEmpty()); local_env->Enter(); // Should complete without problems. ApplyInterruptTest().RunTest(); local_env->Exit(); } // Verify that we can clone an object TEST(ObjectClone) { v8::HandleScope scope; LocalContext env; const char* sample = "var rv = {};" \ "rv.alpha = 'hello';" \ "rv.beta = 123;" \ "rv;"; // Create an object, verify basics. Local<Value> val = CompileRun(sample); CHECK(val->IsObject()); Local<v8::Object> obj = Local<v8::Object>::Cast(val); obj->Set(v8_str("gamma"), v8_str("cloneme")); CHECK_EQ(v8_str("hello"), obj->Get(v8_str("alpha"))); CHECK_EQ(v8::Integer::New(123), obj->Get(v8_str("beta"))); CHECK_EQ(v8_str("cloneme"), obj->Get(v8_str("gamma"))); // Clone it. Local<v8::Object> clone = obj->Clone(); CHECK_EQ(v8_str("hello"), clone->Get(v8_str("alpha"))); CHECK_EQ(v8::Integer::New(123), clone->Get(v8_str("beta"))); CHECK_EQ(v8_str("cloneme"), clone->Get(v8_str("gamma"))); // Set a property on the clone, verify each object. clone->Set(v8_str("beta"), v8::Integer::New(456)); CHECK_EQ(v8::Integer::New(123), obj->Get(v8_str("beta"))); CHECK_EQ(v8::Integer::New(456), clone->Get(v8_str("beta"))); } class AsciiVectorResource : public v8::String::ExternalAsciiStringResource { public: explicit AsciiVectorResource(i::Vector<const char> vector) : data_(vector) {} virtual ~AsciiVectorResource() {} virtual size_t length() const { return data_.length(); } virtual const char* data() const { return data_.start(); } private: i::Vector<const char> data_; }; class UC16VectorResource : public v8::String::ExternalStringResource { public: explicit UC16VectorResource(i::Vector<const i::uc16> vector) : data_(vector) {} virtual ~UC16VectorResource() {} virtual size_t length() const { return data_.length(); } virtual const i::uc16* data() const { return data_.start(); } private: i::Vector<const i::uc16> data_; }; static void MorphAString(i::String* string, AsciiVectorResource* ascii_resource, UC16VectorResource* uc16_resource) { CHECK(i::StringShape(string).IsExternal()); if (string->IsAsciiRepresentation()) { // Check old map is not symbol or long. CHECK(string->map() == i::Heap::external_ascii_string_map()); // Morph external string to be TwoByte string. string->set_map(i::Heap::external_string_map()); i::ExternalTwoByteString* morphed = i::ExternalTwoByteString::cast(string); morphed->set_resource(uc16_resource); } else { // Check old map is not symbol or long. CHECK(string->map() == i::Heap::external_string_map()); // Morph external string to be ASCII string. string->set_map(i::Heap::external_ascii_string_map()); i::ExternalAsciiString* morphed = i::ExternalAsciiString::cast(string); morphed->set_resource(ascii_resource); } } // Test that we can still flatten a string if the components it is built up // from have been turned into 16 bit strings in the mean time. THREADED_TEST(MorphCompositeStringTest) { const char* c_string = "Now is the time for all good men" " to come to the aid of the party"; uint16_t* two_byte_string = AsciiToTwoByteString(c_string); { v8::HandleScope scope; LocalContext env; AsciiVectorResource ascii_resource( i::Vector<const char>(c_string, i::StrLength(c_string))); UC16VectorResource uc16_resource( i::Vector<const uint16_t>(two_byte_string, i::StrLength(c_string))); Local<String> lhs(v8::Utils::ToLocal( i::Factory::NewExternalStringFromAscii(&ascii_resource))); Local<String> rhs(v8::Utils::ToLocal( i::Factory::NewExternalStringFromAscii(&ascii_resource))); env->Global()->Set(v8_str("lhs"), lhs); env->Global()->Set(v8_str("rhs"), rhs); CompileRun( "var cons = lhs + rhs;" "var slice = lhs.substring(1, lhs.length - 1);" "var slice_on_cons = (lhs + rhs).substring(1, lhs.length *2 - 1);"); MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource); MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource); // Now do some stuff to make sure the strings are flattened, etc. CompileRun( "/[^a-z]/.test(cons);" "/[^a-z]/.test(slice);" "/[^a-z]/.test(slice_on_cons);"); const char* expected_cons = "Now is the time for all good men to come to the aid of the party" "Now is the time for all good men to come to the aid of the party"; const char* expected_slice = "ow is the time for all good men to come to the aid of the part"; const char* expected_slice_on_cons = "ow is the time for all good men to come to the aid of the party" "Now is the time for all good men to come to the aid of the part"; CHECK_EQ(String::New(expected_cons), env->Global()->Get(v8_str("cons"))); CHECK_EQ(String::New(expected_slice), env->Global()->Get(v8_str("slice"))); CHECK_EQ(String::New(expected_slice_on_cons), env->Global()->Get(v8_str("slice_on_cons"))); } } TEST(CompileExternalTwoByteSource) { v8::HandleScope scope; LocalContext context; // This is a very short list of sources, which currently is to check for a // regression caused by r2703. const char* ascii_sources[] = { "0.5", "-0.5", // This mainly testes PushBack in the Scanner. "--0.5", // This mainly testes PushBack in the Scanner. NULL }; // Compile the sources as external two byte strings. for (int i = 0; ascii_sources[i] != NULL; i++) { uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]); UC16VectorResource uc16_resource( i::Vector<const uint16_t>(two_byte_string, i::StrLength(ascii_sources[i]))); v8::Local<v8::String> source = v8::String::NewExternal(&uc16_resource); v8::Script::Compile(source); } } class RegExpStringModificationTest { public: RegExpStringModificationTest() : block_(i::OS::CreateSemaphore(0)), morphs_(0), morphs_during_regexp_(0), ascii_resource_(i::Vector<const char>("aaaaaaaaaaaaaab", 15)), uc16_resource_(i::Vector<const uint16_t>(two_byte_content_, 15)) {} ~RegExpStringModificationTest() { delete block_; } void RunTest() { regexp_success_ = false; morph_success_ = false; // Initialize the contents of two_byte_content_ to be a uc16 representation // of "aaaaaaaaaaaaaab". for (int i = 0; i < 14; i++) { two_byte_content_[i] = 'a'; } two_byte_content_[14] = 'b'; // Create the input string for the regexp - the one we are going to change // properties of. input_ = i::Factory::NewExternalStringFromAscii(&ascii_resource_); // Inject the input as a global variable. i::Handle<i::String> input_name = i::Factory::NewStringFromAscii(i::Vector<const char>("input", 5)); i::Top::global_context()->global()->SetProperty(*input_name, *input_, NONE); MorphThread morph_thread(this); morph_thread.Start(); v8::Locker::StartPreemption(1); LongRunningRegExp(); { v8::Unlocker unlock; morph_thread.Join(); } v8::Locker::StopPreemption(); CHECK(regexp_success_); CHECK(morph_success_); } private: // Number of string modifications required. static const int kRequiredModifications = 5; static const int kMaxModifications = 100; class MorphThread : public i::Thread { public: explicit MorphThread(RegExpStringModificationTest* test) : test_(test) {} virtual void Run() { test_->MorphString(); } private: RegExpStringModificationTest* test_; }; void MorphString() { block_->Wait(); while (morphs_during_regexp_ < kRequiredModifications && morphs_ < kMaxModifications) { { v8::Locker lock; // Swap string between ascii and two-byte representation. i::String* string = *input_; MorphAString(string, &ascii_resource_, &uc16_resource_); morphs_++; } i::OS::Sleep(1); } morph_success_ = true; } void LongRunningRegExp() { block_->Signal(); // Enable morphing thread on next preemption. while (morphs_during_regexp_ < kRequiredModifications && morphs_ < kMaxModifications) { int morphs_before = morphs_; { // Match 15-30 "a"'s against 14 and a "b". const char* c_source = "/a?a?a?a?a?a?a?a?a?a?a?a?a?a?aaaaaaaaaaaaaaaa/" ".exec(input) === null"; Local<String> source = String::New(c_source); Local<Script> script = Script::Compile(source); Local<Value> result = script->Run(); CHECK(result->IsTrue()); } int morphs_after = morphs_; morphs_during_regexp_ += morphs_after - morphs_before; } regexp_success_ = true; } i::uc16 two_byte_content_[15]; i::Semaphore* block_; int morphs_; int morphs_during_regexp_; bool regexp_success_; bool morph_success_; i::Handle<i::String> input_; AsciiVectorResource ascii_resource_; UC16VectorResource uc16_resource_; }; // Test that a regular expression execution can be interrupted and // the string changed without failing. TEST(RegExpStringModification) { v8::Locker lock; v8::V8::Initialize(); v8::HandleScope scope; Local<Context> local_env; { LocalContext env; local_env = env.local(); } // Local context should still be live. CHECK(!local_env.IsEmpty()); local_env->Enter(); // Should complete without problems. RegExpStringModificationTest().RunTest(); local_env->Exit(); } // Test that we can set a property on the global object even if there // is a read-only property in the prototype chain. TEST(ReadOnlyPropertyInGlobalProto) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); LocalContext context(0, templ); v8::Handle<v8::Object> global = context->Global(); v8::Handle<v8::Object> global_proto = v8::Handle<v8::Object>::Cast(global->Get(v8_str("__proto__"))); global_proto->Set(v8_str("x"), v8::Integer::New(0), v8::ReadOnly); global_proto->Set(v8_str("y"), v8::Integer::New(0), v8::ReadOnly); // Check without 'eval' or 'with'. v8::Handle<v8::Value> res = CompileRun("function f() { x = 42; return x; }; f()"); // Check with 'eval'. res = CompileRun("function f() { eval('1'); y = 42; return y; }; f()"); CHECK_EQ(v8::Integer::New(42), res); // Check with 'with'. res = CompileRun("function f() { with (this) { y = 42 }; return y; }; f()"); CHECK_EQ(v8::Integer::New(42), res); } static int force_set_set_count = 0; static int force_set_get_count = 0; bool pass_on_get = false; static v8::Handle<v8::Value> ForceSetGetter(v8::Local<v8::String> name, const v8::AccessorInfo& info) { force_set_get_count++; if (pass_on_get) { return v8::Handle<v8::Value>(); } else { return v8::Int32::New(3); } } static void ForceSetSetter(v8::Local<v8::String> name, v8::Local<v8::Value> value, const v8::AccessorInfo& info) { force_set_set_count++; } static v8::Handle<v8::Value> ForceSetInterceptSetter( v8::Local<v8::String> name, v8::Local<v8::Value> value, const v8::AccessorInfo& info) { force_set_set_count++; return v8::Undefined(); } TEST(ForceSet) { force_set_get_count = 0; force_set_set_count = 0; pass_on_get = false; v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); v8::Handle<v8::String> access_property = v8::String::New("a"); templ->SetAccessor(access_property, ForceSetGetter, ForceSetSetter); LocalContext context(NULL, templ); v8::Handle<v8::Object> global = context->Global(); // Ordinary properties v8::Handle<v8::String> simple_property = v8::String::New("p"); global->Set(simple_property, v8::Int32::New(4), v8::ReadOnly); CHECK_EQ(4, global->Get(simple_property)->Int32Value()); // This should fail because the property is read-only global->Set(simple_property, v8::Int32::New(5)); CHECK_EQ(4, global->Get(simple_property)->Int32Value()); // This should succeed even though the property is read-only global->ForceSet(simple_property, v8::Int32::New(6)); CHECK_EQ(6, global->Get(simple_property)->Int32Value()); // Accessors CHECK_EQ(0, force_set_set_count); CHECK_EQ(0, force_set_get_count); CHECK_EQ(3, global->Get(access_property)->Int32Value()); // CHECK_EQ the property shouldn't override it, just call the setter // which in this case does nothing. global->Set(access_property, v8::Int32::New(7)); CHECK_EQ(3, global->Get(access_property)->Int32Value()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(2, force_set_get_count); // Forcing the property to be set should override the accessor without // calling it global->ForceSet(access_property, v8::Int32::New(8)); CHECK_EQ(8, global->Get(access_property)->Int32Value()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(2, force_set_get_count); } TEST(ForceSetWithInterceptor) { force_set_get_count = 0; force_set_set_count = 0; pass_on_get = false; v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); templ->SetNamedPropertyHandler(ForceSetGetter, ForceSetInterceptSetter); LocalContext context(NULL, templ); v8::Handle<v8::Object> global = context->Global(); v8::Handle<v8::String> some_property = v8::String::New("a"); CHECK_EQ(0, force_set_set_count); CHECK_EQ(0, force_set_get_count); CHECK_EQ(3, global->Get(some_property)->Int32Value()); // Setting the property shouldn't override it, just call the setter // which in this case does nothing. global->Set(some_property, v8::Int32::New(7)); CHECK_EQ(3, global->Get(some_property)->Int32Value()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(2, force_set_get_count); // Getting the property when the interceptor returns an empty handle // should yield undefined, since the property isn't present on the // object itself yet. pass_on_get = true; CHECK(global->Get(some_property)->IsUndefined()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(3, force_set_get_count); // Forcing the property to be set should cause the value to be // set locally without calling the interceptor. global->ForceSet(some_property, v8::Int32::New(8)); CHECK_EQ(8, global->Get(some_property)->Int32Value()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(4, force_set_get_count); // Reenabling the interceptor should cause it to take precedence over // the property pass_on_get = false; CHECK_EQ(3, global->Get(some_property)->Int32Value()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(5, force_set_get_count); // The interceptor should also work for other properties CHECK_EQ(3, global->Get(v8::String::New("b"))->Int32Value()); CHECK_EQ(1, force_set_set_count); CHECK_EQ(6, force_set_get_count); } THREADED_TEST(ForceDelete) { v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); LocalContext context(NULL, templ); v8::Handle<v8::Object> global = context->Global(); // Ordinary properties v8::Handle<v8::String> simple_property = v8::String::New("p"); global->Set(simple_property, v8::Int32::New(4), v8::DontDelete); CHECK_EQ(4, global->Get(simple_property)->Int32Value()); // This should fail because the property is dont-delete. CHECK(!global->Delete(simple_property)); CHECK_EQ(4, global->Get(simple_property)->Int32Value()); // This should succeed even though the property is dont-delete. CHECK(global->ForceDelete(simple_property)); CHECK(global->Get(simple_property)->IsUndefined()); } static int force_delete_interceptor_count = 0; static bool pass_on_delete = false; static v8::Handle<v8::Boolean> ForceDeleteDeleter( v8::Local<v8::String> name, const v8::AccessorInfo& info) { force_delete_interceptor_count++; if (pass_on_delete) { return v8::Handle<v8::Boolean>(); } else { return v8::True(); } } THREADED_TEST(ForceDeleteWithInterceptor) { force_delete_interceptor_count = 0; pass_on_delete = false; v8::HandleScope scope; v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(); templ->SetNamedPropertyHandler(0, 0, 0, ForceDeleteDeleter); LocalContext context(NULL, templ); v8::Handle<v8::Object> global = context->Global(); v8::Handle<v8::String> some_property = v8::String::New("a"); global->Set(some_property, v8::Integer::New(42), v8::DontDelete); // Deleting a property should get intercepted and nothing should // happen. CHECK_EQ(0, force_delete_interceptor_count); CHECK(global->Delete(some_property)); CHECK_EQ(1, force_delete_interceptor_count); CHECK_EQ(42, global->Get(some_property)->Int32Value()); // Deleting the property when the interceptor returns an empty // handle should not delete the property since it is DontDelete. pass_on_delete = true; CHECK(!global->Delete(some_property)); CHECK_EQ(2, force_delete_interceptor_count); CHECK_EQ(42, global->Get(some_property)->Int32Value()); // Forcing the property to be deleted should delete the value // without calling the interceptor. CHECK(global->ForceDelete(some_property)); CHECK(global->Get(some_property)->IsUndefined()); CHECK_EQ(2, force_delete_interceptor_count); } // Make sure that forcing a delete invalidates any IC stubs, so we // don't read the hole value. THREADED_TEST(ForceDeleteIC) { v8::HandleScope scope; LocalContext context; // Create a DontDelete variable on the global object. CompileRun("this.__proto__ = { foo: 'horse' };" "var foo = 'fish';" "function f() { return foo.length; }"); // Initialize the IC for foo in f. CompileRun("for (var i = 0; i < 4; i++) f();"); // Make sure the value of foo is correct before the deletion. CHECK_EQ(4, CompileRun("f()")->Int32Value()); // Force the deletion of foo. CHECK(context->Global()->ForceDelete(v8_str("foo"))); // Make sure the value for foo is read from the prototype, and that // we don't get in trouble with reading the deleted cell value // sentinel. CHECK_EQ(5, CompileRun("f()")->Int32Value()); } v8::Persistent<Context> calling_context0; v8::Persistent<Context> calling_context1; v8::Persistent<Context> calling_context2; // Check that the call to the callback is initiated in // calling_context2, the directly calling context is calling_context1 // and the callback itself is in calling_context0. static v8::Handle<Value> GetCallingContextCallback(const v8::Arguments& args) { ApiTestFuzzer::Fuzz(); CHECK(Context::GetCurrent() == calling_context0); CHECK(Context::GetCalling() == calling_context1); CHECK(Context::GetEntered() == calling_context2); return v8::Integer::New(42); } THREADED_TEST(GetCallingContext) { v8::HandleScope scope; calling_context0 = Context::New(); calling_context1 = Context::New(); calling_context2 = Context::New(); // Allow cross-domain access. Local<String> token = v8_str("<security token>"); calling_context0->SetSecurityToken(token); calling_context1->SetSecurityToken(token); calling_context2->SetSecurityToken(token); // Create an object with a C++ callback in context0. calling_context0->Enter(); Local<v8::FunctionTemplate> callback_templ = v8::FunctionTemplate::New(GetCallingContextCallback); calling_context0->Global()->Set(v8_str("callback"), callback_templ->GetFunction()); calling_context0->Exit(); // Expose context0 in context1 and setup a function that calls the // callback function. calling_context1->Enter(); calling_context1->Global()->Set(v8_str("context0"), calling_context0->Global()); CompileRun("function f() { context0.callback() }"); calling_context1->Exit(); // Expose context1 in context2 and call the callback function in // context0 indirectly through f in context1. calling_context2->Enter(); calling_context2->Global()->Set(v8_str("context1"), calling_context1->Global()); CompileRun("context1.f()"); calling_context2->Exit(); // Dispose the contexts to allow them to be garbage collected. calling_context0.Dispose(); calling_context1.Dispose(); calling_context2.Dispose(); calling_context0.Clear(); calling_context1.Clear(); calling_context2.Clear(); } // Check that a variable declaration with no explicit initialization // value does not shadow an existing property in the prototype chain. // // This is consistent with Firefox and Safari. // // See http://crbug.com/12548. THREADED_TEST(InitGlobalVarInProtoChain) { v8::HandleScope scope; LocalContext context; // Introduce a variable in the prototype chain. CompileRun("__proto__.x = 42"); v8::Handle<v8::Value> result = CompileRun("var x; x"); CHECK(!result->IsUndefined()); CHECK_EQ(42, result->Int32Value()); } // Regression test for issue 398. // If a function is added to an object, creating a constant function // field, and the result is cloned, replacing the constant function on the // original should not affect the clone. // See http://code.google.com/p/v8/issues/detail?id=398 THREADED_TEST(ReplaceConstantFunction) { v8::HandleScope scope; LocalContext context; v8::Handle<v8::Object> obj = v8::Object::New(); v8::Handle<v8::FunctionTemplate> func_templ = v8::FunctionTemplate::New(); v8::Handle<v8::String> foo_string = v8::String::New("foo"); obj->Set(foo_string, func_templ->GetFunction()); v8::Handle<v8::Object> obj_clone = obj->Clone(); obj_clone->Set(foo_string, v8::String::New("Hello")); CHECK(!obj->Get(foo_string)->IsUndefined()); } // Regression test for http://crbug.com/16276. THREADED_TEST(Regress16276) { v8::HandleScope scope; LocalContext context; // Force the IC in f to be a dictionary load IC. CompileRun("function f(obj) { return obj.x; }\n" "var obj = { x: { foo: 42 }, y: 87 };\n" "var x = obj.x;\n" "delete obj.y;\n" "for (var i = 0; i < 5; i++) f(obj);"); // Detach the global object to make 'this' refer directly to the // global object (not the proxy), and make sure that the dictionary // load IC doesn't mess up loading directly from the global object. context->DetachGlobal(); CHECK_EQ(42, CompileRun("f(this).foo")->Int32Value()); } THREADED_TEST(PixelArray) { v8::HandleScope scope; LocalContext context; const int kElementCount = 260; uint8_t* pixel_data = reinterpret_cast<uint8_t*>(malloc(kElementCount)); i::Handle<i::PixelArray> pixels = i::Factory::NewPixelArray(kElementCount, pixel_data); i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. for (int i = 0; i < kElementCount; i++) { pixels->set(i, i % 256); } i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. for (int i = 0; i < kElementCount; i++) { CHECK_EQ(i % 256, pixels->get(i)); CHECK_EQ(i % 256, pixel_data[i]); } v8::Handle<v8::Object> obj = v8::Object::New(); i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj); // Set the elements to be the pixels. // jsobj->set_elements(*pixels); obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount); CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value()); obj->Set(v8_str("field"), v8::Int32::New(1503)); context->Global()->Set(v8_str("pixels"), obj); v8::Handle<v8::Value> result = CompileRun("pixels.field"); CHECK_EQ(1503, result->Int32Value()); result = CompileRun("pixels[1]"); CHECK_EQ(1, result->Int32Value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i] = pixels[i] = -i;" "}" "sum;"); CHECK_EQ(-28, result->Int32Value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i] = pixels[i] = 0;" "}" "sum;"); CHECK_EQ(0, result->Int32Value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i] = pixels[i] = 255;" "}" "sum;"); CHECK_EQ(8 * 255, result->Int32Value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i] = pixels[i] = 256 + i;" "}" "sum;"); CHECK_EQ(2076, result->Int32Value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i] = pixels[i] = i;" "}" "sum;"); CHECK_EQ(28, result->Int32Value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i];" "}" "sum;"); CHECK_EQ(28, result->Int32Value()); i::Handle<i::Smi> value(i::Smi::FromInt(2)); i::SetElement(jsobj, 1, value); CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(1))->value()); *value.location() = i::Smi::FromInt(256); i::SetElement(jsobj, 1, value); CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(1))->value()); *value.location() = i::Smi::FromInt(-1); i::SetElement(jsobj, 1, value); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[i] = (i * 65) - 109;" "}" "pixels[1] + pixels[6];"); CHECK_EQ(255, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(1))->value()); CHECK_EQ(21, i::Smi::cast(jsobj->GetElement(2))->value()); CHECK_EQ(86, i::Smi::cast(jsobj->GetElement(3))->value()); CHECK_EQ(151, i::Smi::cast(jsobj->GetElement(4))->value()); CHECK_EQ(216, i::Smi::cast(jsobj->GetElement(5))->value()); CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(6))->value()); CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(7))->value()); result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += pixels[i];" "}" "sum;"); CHECK_EQ(984, result->Int32Value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[i] = (i * 1.1);" "}" "pixels[1] + pixels[6];"); CHECK_EQ(8, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(0))->value()); CHECK_EQ(1, i::Smi::cast(jsobj->GetElement(1))->value()); CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(2))->value()); CHECK_EQ(3, i::Smi::cast(jsobj->GetElement(3))->value()); CHECK_EQ(4, i::Smi::cast(jsobj->GetElement(4))->value()); CHECK_EQ(6, i::Smi::cast(jsobj->GetElement(5))->value()); CHECK_EQ(7, i::Smi::cast(jsobj->GetElement(6))->value()); CHECK_EQ(8, i::Smi::cast(jsobj->GetElement(7))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[7] = undefined;" "}" "pixels[7];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(7))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[6] = '2.3';" "}" "pixels[6];"); CHECK_EQ(2, result->Int32Value()); CHECK_EQ(2, i::Smi::cast(jsobj->GetElement(6))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[5] = NaN;" "}" "pixels[5];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[8] = Infinity;" "}" "pixels[8];"); CHECK_EQ(255, result->Int32Value()); CHECK_EQ(255, i::Smi::cast(jsobj->GetElement(8))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " pixels[9] = -Infinity;" "}" "pixels[9];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(9))->value()); result = CompileRun("pixels[3] = 33;" "delete pixels[3];" "pixels[3];"); CHECK_EQ(33, result->Int32Value()); result = CompileRun("pixels[0] = 10; pixels[1] = 11;" "pixels[2] = 12; pixels[3] = 13;" "pixels.__defineGetter__('2'," "function() { return 120; });" "pixels[2];"); CHECK_EQ(12, result->Int32Value()); result = CompileRun("var js_array = new Array(40);" "js_array[0] = 77;" "js_array;"); CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value()); result = CompileRun("pixels[1] = 23;" "pixels.__proto__ = [];" "js_array.__proto__ = pixels;" "js_array.concat(pixels);"); CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value()); CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value()); result = CompileRun("pixels[1] = 23;"); CHECK_EQ(23, result->Int32Value()); // Test for index greater than 255. Regression test for: // http://code.google.com/p/chromium/issues/detail?id=26337. result = CompileRun("pixels[256] = 255;"); CHECK_EQ(255, result->Int32Value()); result = CompileRun("var i = 0;" "for (var j = 0; j < 8; j++) { i = pixels[256]; }" "i"); CHECK_EQ(255, result->Int32Value()); free(pixel_data); } template <class ExternalArrayClass, class ElementType> static void ExternalArrayTestHelper(v8::ExternalArrayType array_type, int64_t low, int64_t high) { v8::HandleScope scope; LocalContext context; const int kElementCount = 40; int element_size = 0; switch (array_type) { case v8::kExternalByteArray: case v8::kExternalUnsignedByteArray: element_size = 1; break; case v8::kExternalShortArray: case v8::kExternalUnsignedShortArray: element_size = 2; break; case v8::kExternalIntArray: case v8::kExternalUnsignedIntArray: case v8::kExternalFloatArray: element_size = 4; break; default: UNREACHABLE(); break; } ElementType* array_data = static_cast<ElementType*>(malloc(kElementCount * element_size)); i::Handle<ExternalArrayClass> array = i::Handle<ExternalArrayClass>::cast( i::Factory::NewExternalArray(kElementCount, array_type, array_data)); i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. for (int i = 0; i < kElementCount; i++) { array->set(i, static_cast<ElementType>(i)); } i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. for (int i = 0; i < kElementCount; i++) { CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array->get(i))); CHECK_EQ(static_cast<int64_t>(i), static_cast<int64_t>(array_data[i])); } v8::Handle<v8::Object> obj = v8::Object::New(); i::Handle<i::JSObject> jsobj = v8::Utils::OpenHandle(*obj); // Set the elements to be the external array. obj->SetIndexedPropertiesToExternalArrayData(array_data, array_type, kElementCount); CHECK_EQ(1, static_cast<int>(jsobj->GetElement(1)->Number())); obj->Set(v8_str("field"), v8::Int32::New(1503)); context->Global()->Set(v8_str("ext_array"), obj); v8::Handle<v8::Value> result = CompileRun("ext_array.field"); CHECK_EQ(1503, result->Int32Value()); result = CompileRun("ext_array[1]"); CHECK_EQ(1, result->Int32Value()); // Check pass through of assigned smis result = CompileRun("var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += ext_array[i] = ext_array[i] = -i;" "}" "sum;"); CHECK_EQ(-28, result->Int32Value()); // Check assigned smis result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[i] = i;" "}" "var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += ext_array[i];" "}" "sum;"); CHECK_EQ(28, result->Int32Value()); // Check assigned smis in reverse order result = CompileRun("for (var i = 8; --i >= 0; ) {" " ext_array[i] = i;" "}" "var sum = 0;" "for (var i = 0; i < 8; i++) {" " sum += ext_array[i];" "}" "sum;"); CHECK_EQ(28, result->Int32Value()); // Check pass through of assigned HeapNumbers result = CompileRun("var sum = 0;" "for (var i = 0; i < 16; i+=2) {" " sum += ext_array[i] = ext_array[i] = (-i * 0.5);" "}" "sum;"); CHECK_EQ(-28, result->Int32Value()); // Check assigned HeapNumbers result = CompileRun("for (var i = 0; i < 16; i+=2) {" " ext_array[i] = (i * 0.5);" "}" "var sum = 0;" "for (var i = 0; i < 16; i+=2) {" " sum += ext_array[i];" "}" "sum;"); CHECK_EQ(28, result->Int32Value()); // Check assigned HeapNumbers in reverse order result = CompileRun("for (var i = 14; i >= 0; i-=2) {" " ext_array[i] = (i * 0.5);" "}" "var sum = 0;" "for (var i = 0; i < 16; i+=2) {" " sum += ext_array[i];" "}" "sum;"); CHECK_EQ(28, result->Int32Value()); i::ScopedVector<char> test_buf(1024); // Check legal boundary conditions. // The repeated loads and stores ensure the ICs are exercised. const char* boundary_program = "var res = 0;" "for (var i = 0; i < 16; i++) {" " ext_array[i] = %lld;" " if (i > 8) {" " res = ext_array[i];" " }" "}" "res;"; i::OS::SNPrintF(test_buf, boundary_program, low); result = CompileRun(test_buf.start()); CHECK_EQ(low, result->IntegerValue()); i::OS::SNPrintF(test_buf, boundary_program, high); result = CompileRun(test_buf.start()); CHECK_EQ(high, result->IntegerValue()); // Check misprediction of type in IC. result = CompileRun("var tmp_array = ext_array;" "var sum = 0;" "for (var i = 0; i < 8; i++) {" " tmp_array[i] = i;" " sum += tmp_array[i];" " if (i == 4) {" " tmp_array = {};" " }" "}" "sum;"); i::Heap::CollectAllGarbage(false); // Force GC to trigger verification. CHECK_EQ(28, result->Int32Value()); // Make sure out-of-range loads do not throw. i::OS::SNPrintF(test_buf, "var caught_exception = false;" "try {" " ext_array[%d];" "} catch (e) {" " caught_exception = true;" "}" "caught_exception;", kElementCount); result = CompileRun(test_buf.start()); CHECK_EQ(false, result->BooleanValue()); // Make sure out-of-range stores do not throw. i::OS::SNPrintF(test_buf, "var caught_exception = false;" "try {" " ext_array[%d] = 1;" "} catch (e) {" " caught_exception = true;" "}" "caught_exception;", kElementCount); result = CompileRun(test_buf.start()); CHECK_EQ(false, result->BooleanValue()); // Check other boundary conditions, values and operations. result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[7] = undefined;" "}" "ext_array[7];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, static_cast<int>(jsobj->GetElement(7)->Number())); result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[6] = '2.3';" "}" "ext_array[6];"); CHECK_EQ(2, result->Int32Value()); CHECK_EQ(2, static_cast<int>(jsobj->GetElement(6)->Number())); if (array_type != v8::kExternalFloatArray) { // Though the specification doesn't state it, be explicit about // converting NaNs and +/-Infinity to zero. result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[i] = 5;" "}" "for (var i = 0; i < 8; i++) {" " ext_array[i] = NaN;" "}" "ext_array[5];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[i] = 5;" "}" "for (var i = 0; i < 8; i++) {" " ext_array[i] = Infinity;" "}" "ext_array[5];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value()); result = CompileRun("for (var i = 0; i < 8; i++) {" " ext_array[i] = 5;" "}" "for (var i = 0; i < 8; i++) {" " ext_array[i] = -Infinity;" "}" "ext_array[5];"); CHECK_EQ(0, result->Int32Value()); CHECK_EQ(0, i::Smi::cast(jsobj->GetElement(5))->value()); } result = CompileRun("ext_array[3] = 33;" "delete ext_array[3];" "ext_array[3];"); CHECK_EQ(33, result->Int32Value()); result = CompileRun("ext_array[0] = 10; ext_array[1] = 11;" "ext_array[2] = 12; ext_array[3] = 13;" "ext_array.__defineGetter__('2'," "function() { return 120; });" "ext_array[2];"); CHECK_EQ(12, result->Int32Value()); result = CompileRun("var js_array = new Array(40);" "js_array[0] = 77;" "js_array;"); CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value()); result = CompileRun("ext_array[1] = 23;" "ext_array.__proto__ = [];" "js_array.__proto__ = ext_array;" "js_array.concat(ext_array);"); CHECK_EQ(77, v8::Object::Cast(*result)->Get(v8_str("0"))->Int32Value()); CHECK_EQ(23, v8::Object::Cast(*result)->Get(v8_str("1"))->Int32Value()); result = CompileRun("ext_array[1] = 23;"); CHECK_EQ(23, result->Int32Value()); // Test more complex manipulations which cause eax to contain values // that won't be completely overwritten by loads from the arrays. // This catches bugs in the instructions used for the KeyedLoadIC // for byte and word types. { const int kXSize = 300; const int kYSize = 300; const int kLargeElementCount = kXSize * kYSize * 4; ElementType* large_array_data = static_cast<ElementType*>(malloc(kLargeElementCount * element_size)); i::Handle<ExternalArrayClass> large_array = i::Handle<ExternalArrayClass>::cast( i::Factory::NewExternalArray(kLargeElementCount, array_type, array_data)); v8::Handle<v8::Object> large_obj = v8::Object::New(); // Set the elements to be the external array. large_obj->SetIndexedPropertiesToExternalArrayData(large_array_data, array_type, kLargeElementCount); context->Global()->Set(v8_str("large_array"), large_obj); // Initialize contents of a few rows. for (int x = 0; x < 300; x++) { int row = 0; int offset = row * 300 * 4; large_array_data[offset + 4 * x + 0] = (ElementType) 127; large_array_data[offset + 4 * x + 1] = (ElementType) 0; large_array_data[offset + 4 * x + 2] = (ElementType) 0; large_array_data[offset + 4 * x + 3] = (ElementType) 127; row = 150; offset = row * 300 * 4; large_array_data[offset + 4 * x + 0] = (ElementType) 127; large_array_data[offset + 4 * x + 1] = (ElementType) 0; large_array_data[offset + 4 * x + 2] = (ElementType) 0; large_array_data[offset + 4 * x + 3] = (ElementType) 127; row = 298; offset = row * 300 * 4; large_array_data[offset + 4 * x + 0] = (ElementType) 127; large_array_data[offset + 4 * x + 1] = (ElementType) 0; large_array_data[offset + 4 * x + 2] = (ElementType) 0; large_array_data[offset + 4 * x + 3] = (ElementType) 127; } // The goal of the code below is to make "offset" large enough // that the computation of the index (which goes into eax) has // high bits set which will not be overwritten by a byte or short // load. result = CompileRun("var failed = false;" "var offset = 0;" "for (var i = 0; i < 300; i++) {" " if (large_array[4 * i] != 127 ||" " large_array[4 * i + 1] != 0 ||" " large_array[4 * i + 2] != 0 ||" " large_array[4 * i + 3] != 127) {" " failed = true;" " }" "}" "offset = 150 * 300 * 4;" "for (var i = 0; i < 300; i++) {" " if (large_array[offset + 4 * i] != 127 ||" " large_array[offset + 4 * i + 1] != 0 ||" " large_array[offset + 4 * i + 2] != 0 ||" " large_array[offset + 4 * i + 3] != 127) {" " failed = true;" " }" "}" "offset = 298 * 300 * 4;" "for (var i = 0; i < 300; i++) {" " if (large_array[offset + 4 * i] != 127 ||" " large_array[offset + 4 * i + 1] != 0 ||" " large_array[offset + 4 * i + 2] != 0 ||" " large_array[offset + 4 * i + 3] != 127) {" " failed = true;" " }" "}" "!failed;"); CHECK_EQ(true, result->BooleanValue()); free(large_array_data); } free(array_data); } THREADED_TEST(ExternalByteArray) { ExternalArrayTestHelper<v8::internal::ExternalByteArray, int8_t>( v8::kExternalByteArray, -128, 127); } THREADED_TEST(ExternalUnsignedByteArray) { ExternalArrayTestHelper<v8::internal::ExternalUnsignedByteArray, uint8_t>( v8::kExternalUnsignedByteArray, 0, 255); } THREADED_TEST(ExternalShortArray) { ExternalArrayTestHelper<v8::internal::ExternalShortArray, int16_t>( v8::kExternalShortArray, -32768, 32767); } THREADED_TEST(ExternalUnsignedShortArray) { ExternalArrayTestHelper<v8::internal::ExternalUnsignedShortArray, uint16_t>( v8::kExternalUnsignedShortArray, 0, 65535); } THREADED_TEST(ExternalIntArray) { ExternalArrayTestHelper<v8::internal::ExternalIntArray, int32_t>( v8::kExternalIntArray, INT_MIN, // -2147483648 INT_MAX); // 2147483647 } THREADED_TEST(ExternalUnsignedIntArray) { ExternalArrayTestHelper<v8::internal::ExternalUnsignedIntArray, uint32_t>( v8::kExternalUnsignedIntArray, 0, UINT_MAX); // 4294967295 } THREADED_TEST(ExternalFloatArray) { ExternalArrayTestHelper<v8::internal::ExternalFloatArray, float>( v8::kExternalFloatArray, -500, 500); } THREADED_TEST(ExternalArrays) { TestExternalByteArray(); TestExternalUnsignedByteArray(); TestExternalShortArray(); TestExternalUnsignedShortArray(); TestExternalIntArray(); TestExternalUnsignedIntArray(); TestExternalFloatArray(); } THREADED_TEST(ScriptContextDependence) { v8::HandleScope scope; LocalContext c1; const char *source = "foo"; v8::Handle<v8::Script> dep = v8::Script::Compile(v8::String::New(source)); v8::Handle<v8::Script> indep = v8::Script::New(v8::String::New(source)); c1->Global()->Set(v8::String::New("foo"), v8::Integer::New(100)); CHECK_EQ(dep->Run()->Int32Value(), 100); CHECK_EQ(indep->Run()->Int32Value(), 100); LocalContext c2; c2->Global()->Set(v8::String::New("foo"), v8::Integer::New(101)); CHECK_EQ(dep->Run()->Int32Value(), 100); CHECK_EQ(indep->Run()->Int32Value(), 101); } THREADED_TEST(StackTrace) { v8::HandleScope scope; LocalContext context; v8::TryCatch try_catch; const char *source = "function foo() { FAIL.FAIL; }; foo();"; v8::Handle<v8::String> src = v8::String::New(source); v8::Handle<v8::String> origin = v8::String::New("stack-trace-test"); v8::Script::New(src, origin)->Run(); CHECK(try_catch.HasCaught()); v8::String::Utf8Value stack(try_catch.StackTrace()); CHECK(strstr(*stack, "at foo (stack-trace-test") != NULL); } // Test that idle notification can be handled and eventually returns true. THREADED_TEST(IdleNotification) { bool rv = false; for (int i = 0; i < 100; i++) { rv = v8::V8::IdleNotification(); if (rv) break; } CHECK(rv == true); } static uint32_t* stack_limit; static v8::Handle<Value> GetStackLimitCallback(const v8::Arguments& args) { stack_limit = reinterpret_cast<uint32_t*>(i::StackGuard::climit()); return v8::Undefined(); } // Uses the address of a local variable to determine the stack top now. // Given a size, returns an address that is that far from the current // top of stack. static uint32_t* ComputeStackLimit(uint32_t size) { uint32_t* answer = &size - (size / sizeof(size)); // If the size is very large and the stack is very near the bottom of // memory then the calculation above may wrap around and give an address // that is above the (downwards-growing) stack. In that case we return // a very low address. if (answer > &size) return reinterpret_cast<uint32_t*>(sizeof(size)); return answer; } TEST(SetResourceConstraints) { static const int K = 1024; uint32_t* set_limit = ComputeStackLimit(128 * K); // Set stack limit. v8::ResourceConstraints constraints; constraints.set_stack_limit(set_limit); CHECK(v8::SetResourceConstraints(&constraints)); // Execute a script. v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(GetStackLimitCallback); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("get_stack_limit"), fun); CompileRun("get_stack_limit();"); CHECK(stack_limit == set_limit); } TEST(SetResourceConstraintsInThread) { uint32_t* set_limit; { v8::Locker locker; static const int K = 1024; set_limit = ComputeStackLimit(128 * K); // Set stack limit. v8::ResourceConstraints constraints; constraints.set_stack_limit(set_limit); CHECK(v8::SetResourceConstraints(&constraints)); // Execute a script. v8::HandleScope scope; LocalContext env; Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(GetStackLimitCallback); Local<Function> fun = fun_templ->GetFunction(); env->Global()->Set(v8_str("get_stack_limit"), fun); CompileRun("get_stack_limit();"); CHECK(stack_limit == set_limit); } { v8::Locker locker; CHECK(stack_limit == set_limit); } } THREADED_TEST(GetHeapStatistics) { v8::HandleScope scope; LocalContext c1; v8::HeapStatistics heap_statistics; CHECK_EQ(static_cast<int>(heap_statistics.total_heap_size()), 0); CHECK_EQ(static_cast<int>(heap_statistics.used_heap_size()), 0); v8::V8::GetHeapStatistics(&heap_statistics); CHECK_NE(static_cast<int>(heap_statistics.total_heap_size()), 0); CHECK_NE(static_cast<int>(heap_statistics.used_heap_size()), 0); } static double DoubleFromBits(uint64_t value) { double target; #ifdef BIG_ENDIAN_FLOATING_POINT const int kIntSize = 4; // Somebody swapped the lower and higher half of doubles. memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize); memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize); #else memcpy(&target, &value, sizeof(target)); #endif return target; } static uint64_t DoubleToBits(double value) { uint64_t target; #ifdef BIG_ENDIAN_FLOATING_POINT const int kIntSize = 4; // Somebody swapped the lower and higher half of doubles. memcpy(&target, reinterpret_cast<char*>(&value) + kIntSize, kIntSize); memcpy(reinterpret_cast<char*>(&target) + kIntSize, &value, kIntSize); #else memcpy(&target, &value, sizeof(target)); #endif return target; } static double DoubleToDateTime(double input) { double date_limit = 864e13; if (IsNaN(input) || input < -date_limit || input > date_limit) { return i::OS::nan_value(); } return (input < 0) ? -(floor(-input)) : floor(input); } // We don't have a consistent way to write 64-bit constants syntactically, so we // split them into two 32-bit constants and combine them programmatically. static double DoubleFromBits(uint32_t high_bits, uint32_t low_bits) { return DoubleFromBits((static_cast<uint64_t>(high_bits) << 32) | low_bits); } THREADED_TEST(QuietSignalingNaNs) { v8::HandleScope scope; LocalContext context; v8::TryCatch try_catch; // Special double values. double snan = DoubleFromBits(0x7ff00000, 0x00000001); double qnan = DoubleFromBits(0x7ff80000, 0x00000000); double infinity = DoubleFromBits(0x7ff00000, 0x00000000); double max_normal = DoubleFromBits(0x7fefffff, 0xffffffffu); double min_normal = DoubleFromBits(0x00100000, 0x00000000); double max_denormal = DoubleFromBits(0x000fffff, 0xffffffffu); double min_denormal = DoubleFromBits(0x00000000, 0x00000001); // Date values are capped at +/-100000000 days (times 864e5 ms per day) // on either side of the epoch. double date_limit = 864e13; double test_values[] = { snan, qnan, infinity, max_normal, date_limit + 1, date_limit, min_normal, max_denormal, min_denormal, 0, -0, -min_denormal, -max_denormal, -min_normal, -date_limit, -date_limit - 1, -max_normal, -infinity, -qnan, -snan }; int num_test_values = 20; for (int i = 0; i < num_test_values; i++) { double test_value = test_values[i]; // Check that Number::New preserves non-NaNs and quiets SNaNs. v8::Handle<v8::Value> number = v8::Number::New(test_value); double stored_number = number->NumberValue(); if (!IsNaN(test_value)) { CHECK_EQ(test_value, stored_number); } else { uint64_t stored_bits = DoubleToBits(stored_number); // Check if quiet nan (bits 51..62 all set). CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff)); } // Check that Date::New preserves non-NaNs in the date range and // quiets SNaNs. v8::Handle<v8::Value> date = v8::Date::New(test_value); double expected_stored_date = DoubleToDateTime(test_value); double stored_date = date->NumberValue(); if (!IsNaN(expected_stored_date)) { CHECK_EQ(expected_stored_date, stored_date); } else { uint64_t stored_bits = DoubleToBits(stored_date); // Check if quiet nan (bits 51..62 all set). CHECK_EQ(0xfff, static_cast<int>((stored_bits >> 51) & 0xfff)); } } } static v8::Handle<Value> SpaghettiIncident(const v8::Arguments& args) { v8::HandleScope scope; v8::TryCatch tc; v8::Handle<v8::String> str = args[0]->ToString(); if (tc.HasCaught()) return tc.ReThrow(); return v8::Undefined(); } // Test that an exception can be propagated down through a spaghetti // stack using ReThrow. THREADED_TEST(SpaghettiStackReThrow) { v8::HandleScope scope; LocalContext context; context->Global()->Set( v8::String::New("s"), v8::FunctionTemplate::New(SpaghettiIncident)->GetFunction()); v8::TryCatch try_catch; CompileRun( "var i = 0;" "var o = {" " toString: function () {" " if (i == 10) {" " throw 'Hey!';" " } else {" " i++;" " return s(o);" " }" " }" "};" "s(o);"); CHECK(try_catch.HasCaught()); v8::String::Utf8Value value(try_catch.Exception()); CHECK_EQ(0, strcmp(*value, "Hey!")); } TEST(Regress528) { v8::V8::Initialize(); v8::HandleScope scope; v8::Persistent<Context> context; v8::Persistent<Context> other_context; int gc_count; // Create a context used to keep the code from aging in the compilation // cache. other_context = Context::New(); // Context-dependent context data creates reference from the compilation // cache to the global object. const char* source_simple = "1"; context = Context::New(); { v8::HandleScope scope; context->Enter(); Local<v8::String> obj = v8::String::New(""); context->SetData(obj); CompileRun(source_simple); context->Exit(); } context.Dispose(); for (gc_count = 1; gc_count < 10; gc_count++) { other_context->Enter(); CompileRun(source_simple); other_context->Exit(); v8::internal::Heap::CollectAllGarbage(false); if (GetGlobalObjectsCount() == 1) break; } CHECK_GE(2, gc_count); CHECK_EQ(1, GetGlobalObjectsCount()); // Eval in a function creates reference from the compilation cache to the // global object. const char* source_eval = "function f(){eval('1')}; f()"; context = Context::New(); { v8::HandleScope scope; context->Enter(); CompileRun(source_eval); context->Exit(); } context.Dispose(); for (gc_count = 1; gc_count < 10; gc_count++) { other_context->Enter(); CompileRun(source_eval); other_context->Exit(); v8::internal::Heap::CollectAllGarbage(false); if (GetGlobalObjectsCount() == 1) break; } CHECK_GE(2, gc_count); CHECK_EQ(1, GetGlobalObjectsCount()); // Looking up the line number for an exception creates reference from the // compilation cache to the global object. const char* source_exception = "function f(){throw 1;} f()"; context = Context::New(); { v8::HandleScope scope; context->Enter(); v8::TryCatch try_catch; CompileRun(source_exception); CHECK(try_catch.HasCaught()); v8::Handle<v8::Message> message = try_catch.Message(); CHECK(!message.IsEmpty()); CHECK_EQ(1, message->GetLineNumber()); context->Exit(); } context.Dispose(); for (gc_count = 1; gc_count < 10; gc_count++) { other_context->Enter(); CompileRun(source_exception); other_context->Exit(); v8::internal::Heap::CollectAllGarbage(false); if (GetGlobalObjectsCount() == 1) break; } CHECK_GE(2, gc_count); CHECK_EQ(1, GetGlobalObjectsCount()); other_context.Dispose(); }
/* * Copyright (c) 2020, Andreas Kling <kling@serenityos.org> * * SPDX-License-Identifier: BSD-2-Clause */ #include <LibWeb/DOM/Element.h> #include <LibWeb/HTML/Parser/HTMLParser.h> #include <LibWeb/HTML/Parser/StackOfOpenElements.h> namespace Web::HTML { static Vector<FlyString> s_base_list { "applet", "caption", "html", "table", "td", "th", "marquee", "object", "template" }; StackOfOpenElements::~StackOfOpenElements() = default; bool StackOfOpenElements::has_in_scope_impl(const FlyString& tag_name, const Vector<FlyString>& list) const { for (ssize_t i = m_elements.size() - 1; i >= 0; --i) { auto& node = m_elements.at(i); if (node.local_name() == tag_name) return true; if (list.contains_slow(node.local_name())) return false; } VERIFY_NOT_REACHED(); } bool StackOfOpenElements::has_in_scope(const FlyString& tag_name) const { return has_in_scope_impl(tag_name, s_base_list); } bool StackOfOpenElements::has_in_scope_impl(const DOM::Element& target_node, const Vector<FlyString>& list) const { for (ssize_t i = m_elements.size() - 1; i >= 0; --i) { auto& node = m_elements.at(i); if (&node == &target_node) return true; if (list.contains_slow(node.local_name())) return false; } VERIFY_NOT_REACHED(); } bool StackOfOpenElements::has_in_scope(const DOM::Element& target_node) const { return has_in_scope_impl(target_node, s_base_list); } bool StackOfOpenElements::has_in_button_scope(const FlyString& tag_name) const { auto list = s_base_list; list.append("button"); return has_in_scope_impl(tag_name, list); } bool StackOfOpenElements::has_in_table_scope(const FlyString& tag_name) const { return has_in_scope_impl(tag_name, { "html", "table", "template" }); } bool StackOfOpenElements::has_in_list_item_scope(const FlyString& tag_name) const { auto list = s_base_list; list.append("ol"); list.append("ul"); return has_in_scope_impl(tag_name, list); } // https://html.spec.whatwg.org/multipage/parsing.html#has-an-element-in-select-scope // The stack of open elements is said to have a particular element in select scope // when it has that element in the specific scope consisting of all element types except the following: // - optgroup in the HTML namespace // - option in the HTML namespace // NOTE: In this case it's "all element types _except_" bool StackOfOpenElements::has_in_select_scope(const FlyString& tag_name) const { // https://html.spec.whatwg.org/multipage/parsing.html#has-an-element-in-the-specific-scope for (ssize_t i = m_elements.size() - 1; i >= 0; --i) { // 1. Initialize node to be the current node (the bottommost node of the stack). auto& node = m_elements.at(i); // 2. If node is the target node, terminate in a match state. if (node.local_name() == tag_name) return true; // 3. Otherwise, if node is one of the element types in list, terminate in a failure state. // NOTE: Here "list" refers to all elements except option and optgroup if (node.local_name() != HTML::TagNames::option && node.local_name() != HTML::TagNames::optgroup) return false; // 4. Otherwise, set node to the previous entry in the stack of open elements and return to step 2. } // [4.] (This will never fail, since the loop will always terminate in the previous step if the top of the stack // — an html element — is reached.) VERIFY_NOT_REACHED(); } bool StackOfOpenElements::contains(const DOM::Element& element) const { for (auto& element_on_stack : m_elements) { if (&element == &element_on_stack) return true; } return false; } bool StackOfOpenElements::contains(const FlyString& tag_name) const { for (auto& element_on_stack : m_elements) { if (element_on_stack.local_name() == tag_name) return true; } return false; } void StackOfOpenElements::pop_until_an_element_with_tag_name_has_been_popped(const FlyString& tag_name) { while (m_elements.last().local_name() != tag_name) (void)pop(); (void)pop(); } DOM::Element* StackOfOpenElements::topmost_special_node_below(const DOM::Element& formatting_element) { DOM::Element* found_element = nullptr; for (ssize_t i = m_elements.size() - 1; i >= 0; --i) { auto& element = m_elements[i]; if (&element == &formatting_element) break; if (HTMLParser::is_special_tag(element.local_name(), element.namespace_())) found_element = &element; } return found_element; } StackOfOpenElements::LastElementResult StackOfOpenElements::last_element_with_tag_name(const FlyString& tag_name) { for (ssize_t i = m_elements.size() - 1; i >= 0; --i) { auto& element = m_elements[i]; if (element.local_name() == tag_name) return { &element, i }; } return { nullptr, -1 }; } DOM::Element* StackOfOpenElements::element_immediately_above(DOM::Element const& target) { bool found_target = false; for (ssize_t i = m_elements.size() - 1; i >= 0; --i) { auto& element = m_elements[i]; if (&element == &target) { found_target = true; } else if (found_target) return &element; } return nullptr; } void StackOfOpenElements::remove(const DOM::Element& element) { m_elements.remove_first_matching([&element](DOM::Element const& other) { return &other == &element; }); } void StackOfOpenElements::replace(const DOM::Element& to_remove, NonnullRefPtr<DOM::Element> to_add) { for (size_t i = 0; i < m_elements.size(); i++) { if (&m_elements[i] == &to_remove) { m_elements.remove(i); m_elements.insert(i, move(to_add)); break; } } } void StackOfOpenElements::insert_immediately_below(NonnullRefPtr<DOM::Element> element_to_add, DOM::Element const& target) { for (size_t i = 0; i < m_elements.size(); i++) { if (&m_elements[i] == &target) { m_elements.insert(i + 1, move(element_to_add)); break; } } } }
//===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // <random> // template<class IntType = int> // class geometric_distribution // geometric_distribution& operator=(const geometric_distribution&); #include <random> #include <cassert> void test1() { typedef std::geometric_distribution<> D; D d1(0.75); D d2; assert(d1 != d2); d2 = d1; assert(d1 == d2); } int main() { test1(); }
// Copyright (c) 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/web_applications/preinstalled_web_app_manager.h" #include <algorithm> #include <memory> #include <set> #include <vector> #include "base/bind.h" #include "base/containers/contains.h" #include "base/feature_list.h" #include "base/path_service.h" #include "base/run_loop.h" #include "base/test/bind.h" #include "base/test/metrics/histogram_tester.h" #include "base/test/scoped_command_line.h" #include "base/test/scoped_feature_list.h" #include "base/test/scoped_path_override.h" #include "build/chromeos_buildflags.h" #include "chrome/browser/extensions/extension_management_test_util.h" #include "chrome/browser/supervised_user/supervised_user_constants.h" #include "chrome/browser/web_applications/preinstalled_app_install_features.h" #include "chrome/browser/web_applications/preinstalled_web_apps/preinstalled_web_apps.h" #include "chrome/browser/web_applications/web_app_constants.h" #include "chrome/browser/web_applications/web_app_provider.h" #include "chrome/common/chrome_features.h" #include "chrome/common/chrome_paths.h" #include "chrome/test/base/testing_profile.h" #include "components/account_id/account_id.h" #include "components/sync_preferences/testing_pref_service_syncable.h" #include "content/public/test/browser_task_environment.h" #include "testing/gtest/include/gtest/gtest.h" #include "url/gurl.h" #if defined(OS_CHROMEOS) #include "chrome/browser/policy/profile_policy_connector.h" #endif #if BUILDFLAG(IS_CHROMEOS_ASH) #include "ash/constants/ash_switches.h" #include "chrome/browser/ash/login/users/fake_chrome_user_manager.h" #include "components/user_manager/scoped_user_manager.h" #endif namespace web_app { namespace { constexpr char kUserTypesTestDir[] = "user_types"; #if defined(OS_CHROMEOS) constexpr char kGoodJsonTestDir[] = "good_json"; constexpr char kAppAllUrl[] = "https://www.google.com/all"; constexpr char kAppGuestUrl[] = "https://www.google.com/guest"; constexpr char kAppManagedUrl[] = "https://www.google.com/managed"; constexpr char kAppUnmanagedUrl[] = "https://www.google.com/unmanaged"; #endif #if BUILDFLAG(IS_CHROMEOS_ASH) constexpr char kAppChildUrl[] = "https://www.google.com/child"; #endif } // namespace class PreinstalledWebAppManagerTest : public testing::Test { public: PreinstalledWebAppManagerTest() = default; PreinstalledWebAppManagerTest(const PreinstalledWebAppManagerTest&) = delete; PreinstalledWebAppManagerTest& operator=( const PreinstalledWebAppManagerTest&) = delete; ~PreinstalledWebAppManagerTest() override = default; // testing::Test: void SetUp() override { testing::Test::SetUp(); #if BUILDFLAG(IS_CHROMEOS_ASH) user_manager_enabler_ = std::make_unique<user_manager::ScopedUserManager>( std::make_unique<ash::FakeChromeUserManager>()); #endif } void TearDown() override { #if BUILDFLAG(IS_CHROMEOS_ASH) user_manager_enabler_.reset(); #endif testing::Test::TearDown(); } protected: std::vector<ExternalInstallOptions> LoadApps(const char* test_dir, Profile* profile = nullptr) { std::unique_ptr<TestingProfile> testing_profile; if (!profile) { #if defined(OS_CHROMEOS) testing_profile = CreateProfileAndLogin(); profile = testing_profile.get(); #else NOTREACHED(); #endif } // Uses the chrome/test/data/web_app_default_apps/test_dir directory // that holds the *.json data files from which tests should parse as app // configs. base::FilePath config_dir; if (!base::PathService::Get(chrome::DIR_TEST_DATA, &config_dir)) { ADD_FAILURE() << "base::PathService::Get could not resolve chrome::DIR_TEST_DATA"; } config_dir = config_dir.AppendASCII("web_app_default_apps").AppendASCII(test_dir); PreinstalledWebAppManager::SetConfigDirForTesting(&config_dir); auto preinstalled_web_app_manager = std::make_unique<PreinstalledWebAppManager>(profile); auto* provider = WebAppProvider::GetForTest(profile); DCHECK(provider); preinstalled_web_app_manager->SetSubsystems( &provider->registrar(), &provider->externally_managed_app_manager()); std::vector<ExternalInstallOptions> result; base::RunLoop run_loop; preinstalled_web_app_manager->LoadForTesting(base::BindLambdaForTesting( [&](std::vector<ExternalInstallOptions> install_options_list) { result = install_options_list; run_loop.Quit(); })); run_loop.Run(); PreinstalledWebAppManager::SetConfigDirForTesting(nullptr); return result; } // Helper that creates simple test profile. std::unique_ptr<TestingProfile> CreateProfile() { TestingProfile::Builder profile_builder; return profile_builder.Build(); } #if defined(OS_CHROMEOS) // Helper that creates simple test guest profile. std::unique_ptr<TestingProfile> CreateGuestProfile() { TestingProfile::Builder profile_builder; profile_builder.SetGuestSession(); return profile_builder.Build(); } // Helper that creates simple test profile and logs it into user manager. // This makes profile appears as a primary profile in ChromeOS. std::unique_ptr<TestingProfile> CreateProfileAndLogin() { std::unique_ptr<TestingProfile> profile = CreateProfile(); #if BUILDFLAG(IS_CHROMEOS_ASH) const AccountId account_id(AccountId::FromUserEmailGaiaId( profile->GetProfileUserName(), "1234567890")); user_manager()->AddUser(account_id); user_manager()->LoginUser(account_id); #endif return profile; } // Helper that creates simple test guest profile and logs it into user // manager. This makes profile appears as a primary profile in ChromeOS. std::unique_ptr<TestingProfile> CreateGuestProfileAndLogin() { std::unique_ptr<TestingProfile> profile = CreateGuestProfile(); #if BUILDFLAG(IS_CHROMEOS_ASH) user_manager()->AddGuestUser(); user_manager()->LoginUser(user_manager()->GetGuestAccountId()); #endif return profile; } void VerifySetOfApps(Profile* profile, const std::set<GURL>& expectations) { const auto install_options_list = LoadApps(kUserTypesTestDir, profile); ASSERT_EQ(expectations.size(), install_options_list.size()); for (const auto& install_options : install_options_list) ASSERT_EQ(1u, expectations.count(install_options.install_url)); } #endif // defined(OS_CHROMEOS) void ExpectHistograms(int enabled, int disabled, int errors) { histograms_.ExpectUniqueSample( PreinstalledWebAppManager::kHistogramEnabledCount, enabled, 1); histograms_.ExpectUniqueSample( PreinstalledWebAppManager::kHistogramDisabledCount, disabled, 1); histograms_.ExpectUniqueSample( PreinstalledWebAppManager::kHistogramConfigErrorCount, errors, 1); } base::HistogramTester histograms_; ScopedTestingPreinstalledAppData preinstalled_web_app_override_; private: #if BUILDFLAG(IS_CHROMEOS_ASH) ash::FakeChromeUserManager* user_manager() { return static_cast<ash::FakeChromeUserManager*>( user_manager::UserManager::Get()); } // To support primary/non-primary users. std::unique_ptr<user_manager::ScopedUserManager> user_manager_enabler_; #endif // To support context of browser threads. content::BrowserTaskEnvironment task_environment_; }; TEST_F(PreinstalledWebAppManagerTest, ReplacementExtensionBlockedByPolicy) { using PolicyUpdater = extensions::ExtensionManagementPrefUpdater< sync_preferences::TestingPrefServiceSyncable>; auto test_profile = std::make_unique<TestingProfile>(); sync_preferences::TestingPrefServiceSyncable* prefs = test_profile->GetTestingPrefService(); GURL install_url("https://test.app"); constexpr char kExtensionId[] = "abcdefghijklmnopabcdefghijklmnop"; ExternalInstallOptions options(install_url, DisplayMode::kBrowser, ExternalInstallSource::kExternalDefault); options.user_type_allowlist = {"unmanaged"}; options.uninstall_and_replace = {kExtensionId}; options.only_use_app_info_factory = true; options.app_info_factory = base::BindRepeating( []() { return std::make_unique<WebApplicationInfo>(); }); preinstalled_web_app_override_.apps.push_back(std::move(options)); auto expect_present = [&]() { std::vector<ExternalInstallOptions> options_list = LoadApps(/*test_dir=*/"", test_profile.get()); ASSERT_EQ(options_list.size(), 1u); EXPECT_EQ(options_list[0].install_url, install_url); }; auto expect_not_present = [&]() { std::vector<ExternalInstallOptions> options_list = LoadApps(/*test_dir=*/"", test_profile.get()); ASSERT_EQ(options_list.size(), 0u); }; expect_present(); PolicyUpdater(prefs).SetBlocklistedByDefault(false); expect_present(); PolicyUpdater(prefs).SetBlocklistedByDefault(true); expect_not_present(); PolicyUpdater(prefs).SetIndividualExtensionInstallationAllowed(kExtensionId, true); expect_present(); PolicyUpdater(prefs).SetBlocklistedByDefault(false); PolicyUpdater(prefs).SetIndividualExtensionInstallationAllowed(kExtensionId, false); expect_not_present(); // Force installing the replaced extension also blocks the replacement. PolicyUpdater(prefs).SetIndividualExtensionAutoInstalled( kExtensionId, /*update_url=*/{}, /*forced=*/true); expect_present(); } // Only Chrome OS parses config files. #if defined(OS_CHROMEOS) TEST_F(PreinstalledWebAppManagerTest, GoodJson) { const auto install_options_list = LoadApps(kGoodJsonTestDir); // The good_json directory contains two good JSON files: // chrome_platform_status.json and google_io_2016.json. // google_io_2016.json is missing a "create_shortcuts" field, so the default // value of false should be used. std::vector<ExternalInstallOptions> test_install_options_list; { ExternalInstallOptions install_options( GURL("https://www.chromestatus.com/features"), DisplayMode::kBrowser, ExternalInstallSource::kExternalDefault); install_options.user_type_allowlist = {"unmanaged"}; install_options.add_to_applications_menu = true; install_options.add_to_search = true; install_options.add_to_management = true; install_options.add_to_desktop = true; install_options.add_to_quick_launch_bar = false; install_options.require_manifest = true; install_options.disable_if_touchscreen_with_stylus_not_supported = false; test_install_options_list.push_back(std::move(install_options)); } { ExternalInstallOptions install_options( GURL("https://events.google.com/io2016/?utm_source=web_app_manifest"), DisplayMode::kStandalone, ExternalInstallSource::kExternalDefault); install_options.user_type_allowlist = {"unmanaged"}; install_options.add_to_applications_menu = true; install_options.add_to_search = true; install_options.add_to_management = true; install_options.add_to_desktop = false; install_options.add_to_quick_launch_bar = false; install_options.require_manifest = true; install_options.disable_if_touchscreen_with_stylus_not_supported = false; install_options.uninstall_and_replace.push_back("migrationsourceappid"); test_install_options_list.push_back(std::move(install_options)); } EXPECT_EQ(test_install_options_list.size(), install_options_list.size()); for (const auto& install_option : test_install_options_list) { EXPECT_TRUE(base::Contains(install_options_list, install_option)); } ExpectHistograms(/*enabled=*/2, /*disabled=*/0, /*errors=*/0); } TEST_F(PreinstalledWebAppManagerTest, BadJson) { const auto app_infos = LoadApps("bad_json"); // The bad_json directory contains one (malformed) JSON file. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, TxtButNoJson) { const auto app_infos = LoadApps("txt_but_no_json"); // The txt_but_no_json directory contains one file, and the contents of that // file is valid JSON, but that file's name does not end with ".json". EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/0); } TEST_F(PreinstalledWebAppManagerTest, MixedJson) { const auto app_infos = LoadApps("mixed_json"); // The mixed_json directory contains one empty JSON file, one malformed JSON // file and one good JSON file. ScanDirForExternalWebAppsForTesting should // still pick up that one good JSON file: polytimer.json. EXPECT_EQ(1u, app_infos.size()); if (app_infos.size() == 1) { EXPECT_EQ(app_infos[0].install_url.spec(), std::string("https://polytimer.rocks/?homescreen=1")); } ExpectHistograms(/*enabled=*/1, /*disabled=*/0, /*errors=*/2); } TEST_F(PreinstalledWebAppManagerTest, MissingAppUrl) { const auto app_infos = LoadApps("missing_app_url"); // The missing_app_url directory contains one JSON file which is correct // except for a missing "app_url" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, EmptyAppUrl) { const auto app_infos = LoadApps("empty_app_url"); // The empty_app_url directory contains one JSON file which is correct // except for an empty "app_url" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, InvalidAppUrl) { const auto app_infos = LoadApps("invalid_app_url"); // The invalid_app_url directory contains one JSON file which is correct // except for an invalid "app_url" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, TrueHideFromUser) { const auto app_infos = LoadApps("true_hide_from_user"); EXPECT_EQ(1u, app_infos.size()); const auto& app = app_infos[0]; EXPECT_FALSE(app.add_to_applications_menu); EXPECT_FALSE(app.add_to_search); EXPECT_FALSE(app.add_to_management); ExpectHistograms(/*enabled=*/1, /*disabled=*/0, /*errors=*/0); } TEST_F(PreinstalledWebAppManagerTest, InvalidHideFromUser) { const auto app_infos = LoadApps("invalid_hide_from_user"); // The invalid_hide_from_user directory contains on JSON file which is correct // except for an invalid "hide_from_user" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, InvalidCreateShortcuts) { const auto app_infos = LoadApps("invalid_create_shortcuts"); // The invalid_create_shortcuts directory contains one JSON file which is // correct except for an invalid "create_shortcuts" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, MissingLaunchContainer) { const auto app_infos = LoadApps("missing_launch_container"); // The missing_launch_container directory contains one JSON file which is // correct except for a missing "launch_container" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, InvalidLaunchContainer) { const auto app_infos = LoadApps("invalid_launch_container"); // The invalid_launch_container directory contains one JSON file which is // correct except for an invalid "launch_container" field. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/1); } TEST_F(PreinstalledWebAppManagerTest, InvalidUninstallAndReplace) { const auto app_infos = LoadApps("invalid_uninstall_and_replace"); // The invalid_uninstall_and_replace directory contains 2 JSON files which are // correct except for invalid "uninstall_and_replace" fields. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/2); } TEST_F(PreinstalledWebAppManagerTest, PreinstalledWebAppInstallDisabled) { base::test::ScopedFeatureList scoped_feature_list; scoped_feature_list.InitAndDisableFeature( features::kPreinstalledWebAppInstallation); const auto app_infos = LoadApps(kGoodJsonTestDir); EXPECT_EQ(0u, app_infos.size()); histograms_.ExpectTotalCount( PreinstalledWebAppManager::kHistogramConfigErrorCount, 0); histograms_.ExpectTotalCount( PreinstalledWebAppManager::kHistogramEnabledCount, 0); histograms_.ExpectTotalCount( PreinstalledWebAppManager::kHistogramDisabledCount, 0); } TEST_F(PreinstalledWebAppManagerTest, EnabledByFinch) { base::AutoReset<bool> testing_scope = SetPreinstalledAppInstallFeatureAlwaysEnabledForTesting(); const auto app_infos = LoadApps("enabled_by_finch"); // The enabled_by_finch directory contains two JSON file containing apps // that have field trials. As the matching feature is enabled, they should be // in our list of apps to install. EXPECT_EQ(2u, app_infos.size()); ExpectHistograms(/*enabled=*/2, /*disabled=*/0, /*errors=*/0); } TEST_F(PreinstalledWebAppManagerTest, NotEnabledByFinch) { const auto app_infos = LoadApps("enabled_by_finch"); // The enabled_by_finch directory contains two JSON file containing apps // that have field trials. As the matching feature isn't enabled, they should // not be in our list of apps to install. EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/2, /*errors=*/0); } TEST_F(PreinstalledWebAppManagerTest, GuestUser) { VerifySetOfApps(CreateGuestProfileAndLogin().get(), {GURL(kAppAllUrl), GURL(kAppGuestUrl)}); } TEST_F(PreinstalledWebAppManagerTest, UnmanagedUser) { VerifySetOfApps(CreateProfileAndLogin().get(), {GURL(kAppAllUrl), GURL(kAppUnmanagedUrl)}); } TEST_F(PreinstalledWebAppManagerTest, ManagedUser) { const auto profile = CreateProfileAndLogin(); profile->GetProfilePolicyConnector()->OverrideIsManagedForTesting(true); VerifySetOfApps(profile.get(), {GURL(kAppAllUrl), GURL(kAppManagedUrl)}); } #else // No app is expected for non-ChromeOS builds. TEST_F(PreinstalledWebAppManagerTest, NoApp) { EXPECT_TRUE(LoadApps(kUserTypesTestDir, CreateProfile().get()).empty()); } #endif // defined(OS_CHROMEOS) #if BUILDFLAG(IS_CHROMEOS_ASH) // TODO(crbug.com/1252273): Enable test for Lacros. TEST_F(PreinstalledWebAppManagerTest, ChildUser) { const auto profile = CreateProfileAndLogin(); profile->SetSupervisedUserId(supervised_users::kChildAccountSUID); VerifySetOfApps(profile.get(), {GURL(kAppAllUrl), GURL(kAppChildUrl)}); } TEST_F(PreinstalledWebAppManagerTest, NonPrimaryProfile) { VerifySetOfApps(CreateProfile().get(), {GURL(kAppAllUrl), GURL(kAppUnmanagedUrl)}); } // TODO(crbug.com/1252272): Enable extra web apps tests for Lacros. TEST_F(PreinstalledWebAppManagerTest, ExtraWebApps) { // The extra_web_apps directory contains two JSON files in different named // subdirectories. The --extra-web-apps-dir switch should control which // directory apps are loaded from. base::test::ScopedCommandLine command_line; command_line.GetProcessCommandLine()->AppendSwitchASCII( chromeos::switches::kExtraWebAppsDir, "model1"); const auto app_infos = LoadApps("extra_web_apps"); EXPECT_EQ(1u, app_infos.size()); ExpectHistograms(/*enabled=*/1, /*disabled=*/0, /*errors=*/0); } TEST_F(PreinstalledWebAppManagerTest, ExtraWebAppsNoMatchingDirectory) { base::test::ScopedCommandLine command_line; command_line.GetProcessCommandLine()->AppendSwitchASCII( chromeos::switches::kExtraWebAppsDir, "model3"); const auto app_infos = LoadApps("extra_web_apps"); EXPECT_EQ(0u, app_infos.size()); ExpectHistograms(/*enabled=*/0, /*disabled=*/0, /*errors=*/0); } #endif // BUILDFLAG(IS_CHROMEOS_ASH) } // namespace web_app
// MIT Licensed (see LICENSE.md). #include "Precompiled.hpp" #define PlasmaLazyShaderCompositing namespace Plasma { namespace Events { DefineEvent(ShaderInputsModified); } LightningDefineType(ShaderInputsEvent, builder, type) { type->AddAttribute(ObjectAttributes::cHidden); } LightningDefineType(GraphicsStatics, builder, type) { LightningBindGetter(DriverSupport); } GraphicsDriverSupport* GraphicsStatics::GetDriverSupport() { return &PL::gRenderer->mDriverSupport; } System* CreateGraphicsSystem() { return new GraphicsEngine(); } Memory::Pool* gShaderPool = nullptr; LightningDefineType(GraphicsEngine, builder, type) { } GraphicsEngine::GraphicsEngine() : mNewLibrariesCommitted(false), mRenderGroupCount(0), mUpdateRenderGroupCount(false) { mEngineShutdown = false; } GraphicsEngine::~GraphicsEngine() { ShaderSettingsLibrary::GetInstance().ClearLibrary(); ShaderSettingsLibrary::Destroy(); DestroyRenderer(); mRendererThread.WaitForCompletion(); delete mDoRenderTasksJob; delete mRendererJobQueue; delete mShaderGenerator; delete mReturnJobQueue; delete mShowProgressJob; // Call clear functions for anything that has to be manually destructed // Must be done after renderer is destroyed mRenderTasksBack->Clear(); mRenderQueuesBack->Clear(); mRenderTasksFront->Clear(); mRenderQueuesFront->Clear(); } cstr GraphicsEngine::GetName() { return "Graphics"; } void GraphicsEngine::Initialize(SystemInitializer& initializer) { // This needs to be initialized only once and multiple shader generators might // be created ShaderSettingsLibrary::InitializeInstance(); // Need to get translator or mode from Renderer mShaderGenerator = CreateLightningShaderGenerator(); ConnectThisTo(PL::gEngine, Events::EngineShutdown, OnEngineShutdown); ConnectThisTo(PL::gEngine, Events::LoadingStart, StartProgress); ConnectThisTo(PL::gEngine, Events::LoadingProgress, UpdateProgress); ConnectThisTo(PL::gEngine, Events::LoadingFinish, EndProgress); ConnectThisTo(PL::gEngine, Events::ProjectLoaded, OnProjectLoaded); ConnectThisTo(PL::gEngine, Events::NoProjectLoaded, OnNoProjectLoaded); ConnectThisTo(PL::gResources, Events::ResourcesLoaded, OnResourcesAdded); ConnectThisTo(PL::gResources, Events::ResourcesUnloaded, OnResourcesRemoved); // Event connections for ResourceManagers ConnectThisTo(RenderGroupManager::GetInstance(), Events::ResourceAdded, OnRenderGroupAdded); ConnectThisTo(RenderGroupManager::GetInstance(), Events::ResourceModified, OnRenderGroupModified); ConnectThisTo(RenderGroupManager::GetInstance(), Events::ResourceRemoved, OnRenderGroupRemoved); ConnectThisTo(MaterialManager::GetInstance(), Events::ResourceAdded, OnMaterialAdded); ConnectThisTo(MaterialManager::GetInstance(), Events::ResourceModified, OnMaterialModified); ConnectThisTo(MaterialManager::GetInstance(), Events::ResourceRemoved, OnMaterialRemoved); ConnectThisTo(LightningFragmentManager::GetInstance(), Events::ResourceAdded, OnLightningFragmentAdded); ConnectThisTo(LightningFragmentManager::GetInstance(), Events::ResourceModified, OnLightningFragmentModified); ConnectThisTo(LightningFragmentManager::GetInstance(), Events::ResourceRemoved, OnLightningFragmentRemoved); ConnectThisTo(MeshManager::GetInstance(), Events::ResourceAdded, OnMeshAdded); ConnectThisTo(MeshManager::GetInstance(), Events::ResourceModified, OnMeshModified); ConnectThisTo(MeshManager::GetInstance(), Events::ResourceRemoved, OnMeshRemoved); ConnectThisTo(TextureManager::GetInstance(), Events::ResourceAdded, OnTextureAdded); ConnectThisTo(TextureManager::GetInstance(), Events::ResourceModified, OnTextureModified); ConnectThisTo(TextureManager::GetInstance(), Events::ResourceRemoved, OnTextureRemoved); ConnectThisTo(LightningManager::GetInstance(), Events::CompileLightningFragments, OnCompileLightningFragments); ConnectThisTo(LightningManager::GetInstance(), Events::ScriptsCompiledPrePatch, OnScriptsCompiledPrePatch); ConnectThisTo(LightningManager::GetInstance(), Events::ScriptsCompiledCommit, OnScriptsCompiledCommit); ConnectThisTo(LightningManager::GetInstance(), Events::ScriptsCompiledPostPatch, OnScriptsCompiledPostPatch); ConnectThisTo(LightningManager::GetInstance(), Events::ScriptCompilationFailed, OnScriptCompilationFailed); ParticleList::Memory = new Memory::Pool("Particles", Memory::GetRoot(), sizeof(Particle), 1024); gShaderPool = new Memory::Pool("Shaders", Memory::GetRoot(), sizeof(Shader), 1024); mFrameCounter = 0; // Set pointers for swapping render data mRenderQueuesBack = &mRenderQueues[0]; mRenderTasksBack = &mRenderTasks[0]; mRenderQueuesFront = &mRenderQueues[1]; mRenderTasksFront = &mRenderTasks[1]; // Renderer thread setup mRendererJobQueue = new RendererThreadJobQueue(); mRendererJobQueue->mRendererThreadEvent.Initialize(); mRendererJobQueue->mExitThread = false; mDoRenderTasksJob = new DoRenderTasksJob(); mDoRenderTasksJob->mWaitEvent.Signal(); mReturnJobQueue = new RendererJobQueue(); mShowProgressJob = new ShowProgressJob(mRendererJobQueue); mShowProgressJob->mDelayTerminate = true; if (ThreadingEnabled) { mRendererThread.Initialize(RendererThreadMain, mRendererJobQueue, "RendererThread"); ErrorIf(mRendererThread.IsValid() == false, "RendererThread failed to initialize."); } mVerticalSync = false; } #if defined(PlasmaLazyShaderCompositing) void CollectShadersRenderTaskRenderPass(RenderTaskRenderPass* task, ViewBlock* viewBlock, FrameBlock* frameBlock, Array<String>& shadersOut) { // Create a map of RenderGroup id to task memory index for every sub group // entry. HashMap<int, size_t> taskIndexMap; while (taskIndexMap.Size() < task->mSubRenderGroupCount) { size_t index = taskIndexMap.Size() + 1; RenderTaskRenderPass* subTask = task + index; taskIndexMap.InsertOrError(subTask->mRenderGroupIndex, index); } // All ViewNodes under the base RenderGroup. IndexRange viewNodeRange = viewBlock->mRenderGroupRanges[task->mRenderGroupIndex]; for (uint i = viewNodeRange.start; i < viewNodeRange.end; ++i) { ViewNode& viewNode = viewBlock->mViewNodes[i]; FrameNode& frameNode = frameBlock->mFrameNodes[viewNode.mFrameNodeIndex]; // Get the index for this object's RenderGroup settings. Always default to // the base task entry. size_t index = taskIndexMap.FindValue(viewNode.mRenderGroupId, 0); // Offsets to sub RenderGroup settings or just the base task. RenderTaskRenderPass* subTask = task + index; // Different RenderPass tasks are also made to denote RenderGroups to not // render. Don't change state or render the object. if (subTask->mRender == false) continue; MaterialRenderData* materialData = frameNode.mMaterialRenderData; if (materialData == nullptr) continue; // Shader permutation lookup for vertex type and render pass String name = BuildString( GetCoreVertexFragmentName(frameNode.mCoreVertexType), materialData->mCompositeName, subTask->mRenderPassName); shadersOut.PushBack(name); } } void CollectShadersRenderTaskPostProcess(RenderTaskPostProcess* task, Array<String>& shadersOut) { MaterialRenderData* materialData = task->mMaterialRenderData; if (materialData == nullptr && task->mPostProcessName.Empty() == true) return; String compositeName = materialData ? materialData->mCompositeName : task->mPostProcessName; String name = BuildString(cPostVertex, compositeName); shadersOut.PushBack(name); } void CollectShaders(RenderTasks* renderTasks, RenderQueues* renderQueues, Array<String>& shaderNamesOut) { forRange (RenderTaskRange& taskRange, renderTasks->mRenderTaskRanges.All()) { FrameBlock* frameBlock = &renderQueues->mFrameBlocks[taskRange.mFrameBlockIndex]; ViewBlock* viewBlock = &renderQueues->mViewBlocks[taskRange.mViewBlockIndex]; uint taskIndex = taskRange.mTaskIndex; for (uint i = 0; i < taskRange.mTaskCount; ++i) { RenderTask* task = (RenderTask*)&renderTasks->mRenderTaskBuffer.mRenderTaskData[taskIndex]; switch (task->mId) { case RenderTaskType::ClearTarget: taskIndex += sizeof(RenderTaskClearTarget); break; case RenderTaskType::RenderPass: { RenderTaskRenderPass* renderPass = (RenderTaskRenderPass*)task; CollectShadersRenderTaskRenderPass(renderPass, viewBlock, frameBlock, shaderNamesOut); // RenderPass tasks can have multiple following task entries for sub // RenderGroup settings. Have to index past all sub tasks. taskIndex += sizeof(RenderTaskRenderPass) * (renderPass->mSubRenderGroupCount + 1); i += renderPass->mSubRenderGroupCount; } break; case RenderTaskType::PostProcess: CollectShadersRenderTaskPostProcess((RenderTaskPostProcess*)task, shaderNamesOut); taskIndex += sizeof(RenderTaskPostProcess); break; case RenderTaskType::BackBufferBlit: taskIndex += sizeof(RenderTaskBackBufferBlit); break; case RenderTaskType::TextureUpdate: taskIndex += sizeof(RenderTaskTextureUpdate); break; default: Error("Render task not implemented."); break; } } } } #endif void GraphicsEngine::Update(bool debugger) { ZoneScoped; // Do not try to run rendering while this job is going. if (ThreadingEnabled && mShowProgressJob->IsRunning()) return; LightningManager::GetInstance()->mDebugger.DoNotAllowBreakReason = "Cannot currently break within the graphics engine because it must " "continue running in editor"; ProfileScopeTree("GraphicsSystem", "Engine", Color::Blue); // Run all return jobs from renderer Array<RendererJob*> returnJobs; mReturnJobQueue->TakeAllJobs(returnJobs); forRange (RendererJob* job, returnJobs.All()) { job->ReturnExecute(); } ++mFrameCounter; mRenderTasksBack->Clear(); mRenderTasksBack->mShaderInputsVersion = mFrameCounter; mRenderQueuesBack->Clear(); mRenderQueuesBack->mSkinningBufferVersion = mFrameCounter; // UpdateRenderGroups can happen at the beginning of update if broadphase is // done within this update function UpdateRenderGroups(); { ZoneScopedN("FrameUpdate"); ProfileScopeTree("FrameUpdate", "GraphicsSystem", Color::SpringGreen); float frameDt = PL::gEngine->has(TimeSystem)->mEngineDt; forRange (GraphicsSpace& space, mSpaces.All()) space.OnFrameUpdate(frameDt); } { ZoneScopedN("RenderTasksUpdate"); ProfileScopeTree("RenderTasksUpdate", "GraphicsSystem", Color::LimeGreen); forRange (GraphicsSpace& space, mSpaces.All()) space.RenderTasksUpdate(*mRenderTasksBack); } { ZoneScopedN("RenderQueuesUpdate"); ProfileScopeTree("RenderQueuesUpdate", "GraphicsSystem", Color::LawnGreen); forRange (GraphicsSpace& space, mSpaces.All()) space.RenderQueuesUpdate(*mRenderTasksBack, *mRenderQueuesBack); Sort(mRenderTasksBack->mRenderTaskRanges.All()); } { ZoneScopedN("UiRenderUpdate") ProfileScopeTree("UiRenderUpdate", "GraphicsSystem", Color::DarkSeaGreen); // add ui render task range after sorting so that everything else renders // before it Event event; DispatchEvent("UiRenderUpdate", &event); } { ZoneScopedN("WaitOnRenderer") ProfileScopeTree("WaitOnRenderer", "GraphicsSystem", Color::Bisque); // cannot run another RenderTasks job unless the last one is done mDoRenderTasksJob->WaitOnThisJob(); } Swap(mRenderTasksBack, mRenderTasksFront); Swap(mRenderQueuesBack, mRenderQueuesFront); // pass everything to the renderer, all rendering happens on this job mDoRenderTasksJob->mRenderTasks = mRenderTasksFront; mDoRenderTasksJob->mRenderQueues = mRenderQueuesFront; #if defined(PlasmaLazyShaderCompositing) Array<String> shaderNames; CollectShaders(mRenderTasksFront, mRenderQueuesFront, shaderNames); ShaderSet shadersToCompile; forRange (StringParam name, shaderNames) { Shader* shader = mCompositeShaders.FindValue(name, nullptr); if (shader == nullptr) shader = mPostProcessShaders.FindValue(name, nullptr); ContinueIf(shader == nullptr, "Expected shader"); if (!shader->mSentToRenderer) shadersToCompile.Insert(shader); } if (!shadersToCompile.Empty()) { AddShadersJob* addShadersJob = new AddShadersJob(mRendererJobQueue); bool compiled = mShaderGenerator->BuildShaders(shadersToCompile, mUniqueComposites, addShadersJob->mShaders); ErrorIf(!compiled, "Shaders did not compile after composition."); AddRendererJob(addShadersJob); } #endif AddRendererJob(mDoRenderTasksJob); // Add job for texture data after render tasks job so that // textures being written to in render tasks will have the expected data forRange (TextureToFile& toFile, mDelayedTextureToFile.All()) { SaveImageToFileJob* toFileJob = new SaveImageToFileJob(); toFileJob->mRenderData = toFile.mTexture->mRenderData; toFileJob->mFilename = toFile.mFilename; AddRendererJob(toFileJob); } mDelayedTextureToFile.Clear(); // Release textures that were not reused this frame mRenderTargetManager.ClearUnusedTextures(); // when new RenderGroups are added or removed, it could happen after // broadphase used them to organize visible objects but before that data is // used to render, and the renderer needs RenderGroup values to match still // so, any management or changes to RenderGroup id's should happen here after // the whole frame and rendering are completed if done at end of update, add // and remove events need to defer their operations until here // UpdateRenderGroups(); if (gDebugDraw->MaxCountExceeded()) DoNotifyWarning("Max debug object count exceeded.", "To edit the max count, open the Select menu and choose " "'Select Project'. " "Expand the component 'DebugSettings' (or add it) and " "modify 'MaxDebugObjects'."); gDebugDraw->ClearObjects(); LightningManager::GetInstance()->mDebugger.DoNotAllowBreakReason.Clear(); } void GraphicsEngine::OnEngineShutdown(Event* event) { mRenderTargetManager.Shutdown(); // Clear all shaders HashSet<Shader*> shadersToRemove; shadersToRemove.Append(mCompositeShaders.Values()); shadersToRemove.Append(mPostProcessShaders.Values()); if (shadersToRemove.Empty() == false) { RemoveShadersJob* removeShadersJob = new RemoveShadersJob(); forRange (Shader* shader, shadersToRemove.All()) { ShaderEntry entry(shader); removeShadersJob->mShaders.PushBack(entry); mCompositeShaders.Erase(shader->mName); gShaderPool->DeallocateType(shader); } AddRendererJob(removeShadersJob); mCompositeShaders.Clear(); mPostProcessShaders.Clear(); mShaderCoreVertexMap.Clear(); mShaderCompositeMap.Clear(); mShaderRenderPassMap.Clear(); } mEngineShutdown = true; mShowProgressJob->ForceTerminate(); while (mRendererJobQueue->HasJobs()) ; } void GraphicsEngine::AddSpace(GraphicsSpace* space) { mSpaces.PushBack(space); } void GraphicsEngine::RemoveSpace(GraphicsSpace* space) { mSpaces.Erase(space); } void GraphicsEngine::StartProgress(Event* event) { if (mEngineShutdown) return; Texture* loadingTexture = TextureManager::FindOrNull("PlasmaLoading"); Texture* logoTexture = TextureManager::FindOrNull("PlasmaLogoAnimated"); Texture* whiteTexture = TextureManager::FindOrNull("White"); Texture* splashTexture = TextureManager::FindOrNull("PlasmaSplash"); if (loadingTexture == nullptr || logoTexture == nullptr || whiteTexture == nullptr || splashTexture == nullptr) return; mShowProgressJob->Lock(); mShowProgressJob->mLoadingTexture = loadingTexture->mRenderData; mShowProgressJob->mLogoTexture = logoTexture->mRenderData; mShowProgressJob->mWhiteTexture = whiteTexture->mRenderData; mShowProgressJob->mSplashTexture = splashTexture->mRenderData; mShowProgressJob->mLogoFrameSize = 128; mShowProgressJob->mCurrentPercent = 0.0f; mShowProgressJob->mTargetPercent = 0.0f; mShowProgressJob->mProgressWidth = loadingTexture->mWidth; mShowProgressJob->mProgressText.Clear(); mShowProgressJob->mPerJobTimer.Reset(); mShowProgressJob->Unlock(); mShowProgressJob->Start(); mRendererJobQueue->AddJob(mShowProgressJob); } void GraphicsEngine::UpdateProgress(ProgressEvent* event) { if (mEngineShutdown) return; Font* font = FontManager::FindOrNull("NotoSans-Regular"); if (font == nullptr) return; RenderFont* renderFont = font->GetRenderFont(16); String progressText = BuildString(event->Operation, " ", event->CurrentTask, " ", event->ProgressLine); FontProcessorVertexArray fontProcessor(Vec4(1.0f)); AddTextRange(fontProcessor, renderFont, progressText, Vec2::cZero, TextAlign::Left, Vec2(1.0f), Vec2((float)mShowProgressJob->mProgressWidth, (float)renderFont->mFontHeight), true); mShowProgressJob->Lock(); mShowProgressJob->mTargetPercent = event->Percentage; mShowProgressJob->mFontTexture = renderFont->mTexture->mRenderData; mShowProgressJob->mProgressText = fontProcessor.mVertices; mShowProgressJob->Unlock(); // When not threaded, we want to update the progress, but we don't // want to do a full render for every single resource that is loaded. if (!ThreadingEnabled) { static const size_t cProgressUpdateInterval = 10; static size_t sProgressUpdateFrame = 0; if (sProgressUpdateFrame % cProgressUpdateInterval == 0) { RendererThreadMain(mRendererJobQueue); YieldToOs(); } ++sProgressUpdateFrame; } } void GraphicsEngine::EndProgress(Event* event) { if (mEngineShutdown) return; mShowProgressJob->Terminate(); } void GraphicsEngine::OnProjectLoaded(ObjectEvent* event) { if (mProjectCog.IsNotNull()) DisconnectAll(mProjectCog, this); mProjectCog = (Cog*)event->GetSource(); ConnectThisTo(*mProjectCog, Events::ObjectModified, OnProjectCogModified); OnProjectCogModified(event); EndProgressDelayTerminate(); } void GraphicsEngine::OnNoProjectLoaded(Event* event) { EndProgressDelayTerminate(); } void GraphicsEngine::EndProgressDelayTerminate() { // Allows job to terminate after startup completes for the first time. mShowProgressJob->Lock(); mShowProgressJob->mDelayTerminate = false; mShowProgressJob->Unlock(); if (!ThreadingEnabled) return; // Block until job completes. // Important for exports to not run engine update until job fully exits. while (mShowProgressJob->IsRunning()) Os::Sleep(mShowProgressJob->mExecuteDelay); } void GraphicsEngine::SetSplashscreenLoading() { mShowProgressJob->mSplashMode = true; } void GraphicsEngine::OnOsWindowMinimized(Event* event) { PL::gRenderer->mThreadLock.Lock(); PL::gRenderer->mBackBufferSafe = false; PL::gRenderer->mThreadLock.Unlock(); } void GraphicsEngine::OnOsWindowRestored(Event* event) { PL::gRenderer->mThreadLock.Lock(); PL::gRenderer->mBackBufferSafe = true; PL::gRenderer->mThreadLock.Unlock(); } void GraphicsEngine::OnProjectCogModified(Event* event) { if (FrameRateSettings* frameRate = mProjectCog.has(FrameRateSettings)) SetVerticalSync(frameRate->mVerticalSync && !frameRate->mLimitFrameRate); else SetVerticalSync(false); if (DebugSettings* debugSettings = mProjectCog.has(DebugSettings)) gDebugDraw->SetMaxDebugObjects(debugSettings->GetMaxDebugObjects()); else gDebugDraw->SetMaxDebugObjects(); } void GraphicsEngine::SetVerticalSync(bool verticalSync) { if (verticalSync == mVerticalSync) return; mVerticalSync = verticalSync; SetVSyncJob* setVSyncJob = new SetVSyncJob(); setVSyncJob->mVSync = mVerticalSync; AddRendererJob(setVSyncJob); } uint GraphicsEngine::GetRenderGroupCount() { return mRenderGroupCount; } void GraphicsEngine::UpdateRenderGroups() { if (mUpdateRenderGroupCount) { mRenderGroups.Clear(); RenderGroupManager::GetInstance()->EnumerateResources(mRenderGroups); mRenderGroupCount = mRenderGroups.Size(); for (uint i = 0; i < mRenderGroupCount; ++i) { RenderGroup* renderGroup = (RenderGroup*)mRenderGroups[i]; renderGroup->mSortId = i; } mUpdateRenderGroupCount = false; } } void GraphicsEngine::CheckTextureYInvert(Texture* texture) { ZoneScoped; ProfileScopeFunctionArgs(texture->Name); // Check for Y-invert // Some Api's expect byte 0 to be the bottom left pixel, in Plasma byte 0 is the // top left Have to Y-invert because sampling from a rendered target must also // work correctly Uv coordinate correction from Plasma to Api is done by the // shader translation of texture samples if (!PL::gRenderer->YInvertImageData(texture->mType)) return; // All incoming image data from Plasma should be a color format and/or block // compressed if (texture->mImageData && IsColorFormat(texture->mFormat)) { if (texture->mCompression == TextureCompression::None) { for (uint i = 0; i < texture->mMipCount; ++i) { MipHeader* mipHeader = texture->mMipHeaders + i; byte* mipData = texture->mImageData + mipHeader->mDataOffset; YInvertNonCompressed(mipData, mipHeader->mWidth, mipHeader->mHeight, GetPixelSize(texture->mFormat)); } } else { for (uint i = 0; i < texture->mMipCount; ++i) { MipHeader* mipHeader = texture->mMipHeaders + i; byte* mipData = texture->mImageData + mipHeader->mDataOffset; YInvertBlockCompressed( mipData, mipHeader->mWidth, mipHeader->mHeight, mipHeader->mDataSize, texture->mCompression); } } } } void GraphicsEngine::AddRendererJob(RendererJob* rendererJob) { mRendererJobQueue->AddJob(rendererJob); if (!ThreadingEnabled) RendererThreadMain(mRendererJobQueue); } void GraphicsEngine::CreateRenderer(OsWindow* mainWindow) { OsHandle mainWindowHandle = mainWindow->GetWindowHandle(); CreateRendererJob* rendererJob = new CreateRendererJob(); rendererJob->mMainWindowHandle = mainWindowHandle; AddRendererJob(rendererJob); rendererJob->WaitOnThisJob(); if (rendererJob->mError.Empty() == false) FatalEngineError(rendererJob->mError.c_str()); delete rendererJob; gIntelGraphics = PL::gRenderer->mDriverSupport.mIntel; ConnectThisTo(mainWindow, Events::OsWindowMinimized, OnOsWindowMinimized); ConnectThisTo(mainWindow, Events::OsWindowRestored, OnOsWindowRestored); } void GraphicsEngine::DestroyRenderer() { DestroyRendererJob* rendererJob = new DestroyRendererJob(); rendererJob->mRendererJobQueue = mRendererJobQueue; AddRendererJob(rendererJob); rendererJob->WaitOnThisJob(); delete rendererJob; } void GraphicsEngine::AddMaterial(Material* material) { if (!material->mRenderData) material->mRenderData = PL::gRenderer->CreateMaterialRenderData(); AddMaterialJob* rendererJob = new AddMaterialJob(); rendererJob->mRenderData = material->mRenderData; rendererJob->mCompositeName = material->mCompositeName; rendererJob->mMaterialId = material->mResourceId.mValue; AddRendererJob(rendererJob); } void GraphicsEngine::AddMesh(Mesh* mesh) { if (!mesh->mRenderData) mesh->mRenderData = PL::gRenderer->CreateMeshRenderData(); AddMeshJob* rendererJob = new AddMeshJob(); rendererJob->mRenderData = mesh->mRenderData; VertexBuffer* vertices = &mesh->mVertices; IndexBuffer* indices = &mesh->mIndices; rendererJob->mPrimitiveType = mesh->mPrimitiveType; rendererJob->mVertexSize = 0; rendererJob->mVertexCount = 0; rendererJob->mVertexData = nullptr; rendererJob->mIndexSize = indices->mIndexSize; rendererJob->mIndexCount = 0; rendererJob->mIndexData = nullptr; if (vertices->mFixedDesc.mVertexSize != 0) { rendererJob->mVertexSize = vertices->mFixedDesc.mVertexSize; rendererJob->mVertexCount = vertices->mDataSize / vertices->mFixedDesc.mVertexSize; // Do not try allocating without a full vertex worth of data. if (rendererJob->mVertexCount > 0) { uint vertexDataSize = rendererJob->mVertexSize * rendererJob->mVertexCount; rendererJob->mVertexData = new byte[vertexDataSize]; memcpy(rendererJob->mVertexData, vertices->mData, vertexDataSize); } rendererJob->mVertexAttributes.Reserve(8); for (uint i = 0; i < FixedVertexDescription::sMaxElements; ++i) { if (vertices->mFixedDesc.mAttributes[i].mSemantic == VertexSemantic::None) break; rendererJob->mVertexAttributes.PushBack(vertices->mFixedDesc.mAttributes[i]); } } rendererJob->mIndexSize = indices->mIndexSize; rendererJob->mIndexCount = indices->mIndexCount; if (indices->mData.Empty() == false) { uint indexDataSize = rendererJob->mIndexSize * rendererJob->mIndexCount; rendererJob->mIndexData = new byte[indexDataSize]; memcpy(rendererJob->mIndexData, &indices->mData[0], indexDataSize); } rendererJob->mBones.Assign(mesh->mBones.All()); AddRendererJob(rendererJob); } void GraphicsEngine::AddTexture(Texture* texture, bool subImage, uint xOffset, uint yOffset) { // Do y inverting on main thread (if needed) // otherwise the render thread takes too long to upload textures // and ram usage builds up too high during project loading. // NOTE: Gpu mip generation on block compressed textures // also takes a decent amount of time. CheckTextureYInvert(texture); if (!texture->mRenderData) texture->mRenderData = PL::gRenderer->CreateTextureRenderData(); AddTextureJob* rendererJob = new AddTextureJob(); rendererJob->mRenderData = texture->mRenderData; rendererJob->mWidth = texture->mWidth; rendererJob->mHeight = texture->mHeight; rendererJob->mMipCount = texture->mMipCount; rendererJob->mTotalDataSize = texture->mTotalDataSize; rendererJob->mMipHeaders = texture->mMipHeaders; rendererJob->mImageData = texture->mImageData; texture->mMipHeaders = nullptr; texture->mImageData = nullptr; rendererJob->mType = texture->mType; rendererJob->mFormat = texture->mFormat; rendererJob->mCompression = texture->mCompression; rendererJob->mAddressingX = texture->mAddressingX; rendererJob->mAddressingY = texture->mAddressingY; rendererJob->mFiltering = texture->mFiltering; rendererJob->mCompareMode = texture->mCompareMode; rendererJob->mCompareFunc = texture->mCompareFunc; rendererJob->mAnisotropy = texture->mAnisotropy; rendererJob->mMipMapping = texture->mMipMapping; rendererJob->mMaxMipOverride = texture->mMaxMipOverride; rendererJob->mSubImage = subImage; rendererJob->mXOffset = xOffset; rendererJob->mYOffset = yOffset; AddRendererJob(rendererJob); } void GraphicsEngine::RemoveMaterial(Material* material) { // Handle double remove events if (material->mRenderData == nullptr) return; RemoveMaterialJob* rendererJob = new RemoveMaterialJob(); rendererJob->mRenderData = material->mRenderData; material->mRenderData = nullptr; AddRendererJob(rendererJob); } void GraphicsEngine::RemoveMesh(Mesh* mesh) { // Handle double remove events if (mesh->mRenderData == nullptr) return; RemoveMeshJob* rendererJob = new RemoveMeshJob(); rendererJob->mRenderData = mesh->mRenderData; mesh->mRenderData = nullptr; AddRendererJob(rendererJob); } void GraphicsEngine::RemoveTexture(Texture* texture) { // Handle double remove events if (texture->mRenderData == nullptr) return; RemoveTextureJob* rendererJob = new RemoveTextureJob(); rendererJob->mRenderData = texture->mRenderData; texture->mRenderData = nullptr; AddRendererJob(rendererJob); } void GraphicsEngine::SetLazyShaderCompilation(bool lazyShaderCompilation) { SetLazyShaderCompilationJob* rendererJob = new SetLazyShaderCompilationJob(); rendererJob->mLazyShaderCompilation = lazyShaderCompilation; AddRendererJob(rendererJob); } void GraphicsEngine::OnRenderGroupAdded(ResourceEvent* event) { mAddedRenderGroups.PushBack((RenderGroup*)event->EventResource); mUpdateRenderGroupCount = true; } void GraphicsEngine::OnRenderGroupModified(ResourceEvent* event) { if (!event->LastIdName.Empty()) { Array<Resource*> materials; MaterialManager::GetInstance()->EnumerateResources(materials); ResourceListResetIdNames<Material, RenderGroupManager>(materials); ResolveRenderGroupHierarchies(); } } void GraphicsEngine::OnRenderGroupRemoved(ResourceEvent* event) { RenderGroup* renderGroup = (RenderGroup*)event->EventResource; ResourceListRemove(renderGroup); // Remove hierarchy connections. renderGroup->SetParentInternal(nullptr); Array<RenderGroup*> children = renderGroup->mChildrenInternal; forRange (RenderGroup* child, children.All()) child->SetParentInternal(nullptr); mUpdateRenderGroupCount = true; } void GraphicsEngine::OnMaterialAdded(ResourceEvent* event) { mAddedMaterials.PushBack((Material*)event->EventResource); } void GraphicsEngine::OnMaterialModified(ResourceEvent* event) { Material* material = (Material*)event->EventResource; if (material->mCompositionChanged) { UpdateUniqueComposites(material, UniqueCompositeOp::Modify); material->mCompositionChanged = false; AddMaterial(material); CompileShaders(); } if (!event->LastIdName.Empty()) { Array<Resource*> renderGroups; RenderGroupManager::GetInstance()->EnumerateResources(renderGroups); ResourceListResetIdNames<RenderGroup, MaterialManager>(renderGroups); } } void GraphicsEngine::OnMaterialRemoved(ResourceEvent* event) { Material* material = (Material*)event->EventResource; ResourceListRemove(material); UpdateUniqueComposites(material, UniqueCompositeOp::Remove); if (mRemovedComposites.Empty() == false) CompileShaders(); RemoveMaterial(material); } void GraphicsEngine::OnLightningFragmentAdded(ResourceEvent* event) { // OnResourcesAdded will invoke a compile after this mModifiedFragmentFiles.PushBack(event->EventResource->Name); } void GraphicsEngine::OnLightningFragmentModified(ResourceEvent* event) { // Happens on save, wait for successful compilation to process mModifiedFragmentFiles.PushBack(event->EventResource->Name); } void GraphicsEngine::OnLightningFragmentRemoved(ResourceEvent* event) { // Only need removed fragments if going to send compile in removed resources mRemovedFragmentFiles.PushBack(event->EventResource->Name); // Cannot process modified files that were removed // Added/modified methods do not erase from removed list because removed has // to operate on the previous fragments library mModifiedFragmentFiles.EraseValue(event->EventResource->Name); } void GraphicsEngine::OnMeshAdded(ResourceEvent* event) { AddMesh((Mesh*)event->EventResource); } void GraphicsEngine::OnMeshModified(ResourceEvent* event) { AddMesh((Mesh*)event->EventResource); } void GraphicsEngine::OnMeshRemoved(ResourceEvent* event) { RemoveMesh((Mesh*)event->EventResource); } void GraphicsEngine::OnTextureAdded(ResourceEvent* event) { AddTexture((Texture*)event->EventResource); } void GraphicsEngine::OnTextureModified(ResourceEvent* event) { AddTexture((Texture*)event->EventResource); } void GraphicsEngine::OnTextureRemoved(ResourceEvent* event) { RemoveTexture((Texture*)event->EventResource); } void GraphicsEngine::OnResourcesAdded(ResourceEvent* event) { forRange (Material* material, mAddedMaterials.All()) ResourceListAdd(material); forRange (RenderGroup* renderGroup, mAddedRenderGroups.All()) ResourceListAdd(renderGroup); if (mAddedMaterials.Empty() == false) { forRange (Resource* resource, RenderGroupManager::GetInstance()->AllResources()) ResourceListResolveReferences((RenderGroup*)resource); } if (mAddedRenderGroups.Empty() == false) { forRange (Resource* resource, MaterialManager::GetInstance()->AllResources()) ResourceListResolveReferences((Material*)resource); ResolveRenderGroupHierarchies(); } // Materials could be added in a non compiling state // so copy to a separate list for handling shader composites mAddedMaterialsForComposites.Append(mAddedMaterials.All()); mAddedMaterials.Clear(); mAddedRenderGroups.Clear(); // If added LightningFragments if (mModifiedFragmentFiles.Empty() == false) { LightningManager::GetInstance()->TriggerCompileExternally(); } else { forRange (Material* material, mAddedMaterialsForComposites.All()) { UpdateUniqueComposites(material, UniqueCompositeOp::Add); AddMaterial(material); } mAddedMaterialsForComposites.Clear(); CompileShaders(); } } // should this invoke a compile if there are removed fragment files? void GraphicsEngine::OnResourcesRemoved(ResourceEvent* event) { // Can't rebuild meta on shutdown because content system is destroyed by this // point // if (mEngineShutdown || mRemovedFragmentFiles.Empty()) // return; // BuildFragmentsLibrary(); } void GraphicsEngine::AddComposite(Material* material) { String compositeName = material->mCompositeName; if (mUniqueComposites.ContainsKey(compositeName)) { mUniqueComposites[compositeName].mReferences += 1; } else { UniqueComposite newComposite; newComposite.mName = compositeName; newComposite.mFragmentNames = material->mFragmentNames; newComposite.mFragmentNameMap.Append(newComposite.mFragmentNames.All()); newComposite.mReferences = 1; mUniqueComposites.Insert(compositeName, newComposite); mModifiedComposites.Insert(compositeName); } } void GraphicsEngine::RemoveComposite(StringParam compositeName) { ErrorIf(mUniqueComposites.ContainsKey(compositeName) == false, "Reference count error."); mUniqueComposites[compositeName].mReferences -= 1; if (mUniqueComposites[compositeName].mReferences == 0) { mRemovedComposites.Insert(compositeName); mModifiedComposites.Erase(compositeName); mUniqueComposites.Erase(compositeName); } } Shader* GraphicsEngine::GetOrCreateShader(StringParam coreVertex, StringParam composite, StringParam renderPass, ShaderMap& shaderMap) { String name = BuildString(coreVertex, composite, renderPass); Shader*& shader = shaderMap[name]; if (shader) return shader; shader = gShaderPool->AllocateType<Shader>(); shader->mCoreVertex = coreVertex; shader->mComposite = composite; shader->mRenderPass = renderPass; shader->mName = name; return shader; } void GraphicsEngine::FindShadersToCompile(Array<String>& coreVertexRange, Array<String>& compositeRange, Array<String>& renderPassRange, ShaderSetMap& testMap, uint index, ShaderSet& shaders) { Array<String>* ranges[] = {&coreVertexRange, &compositeRange, &renderPassRange}; // Index order for iteration uint i0 = index; uint i1 = (i0 + 1) % 3; uint i2 = (i0 + 2) % 3; // Fragment order from iteration order for indexing fragment names, // {CoreVertex, Composite, RenderPass} uint f0 = (index + index) % 3; uint f1 = (f0 + 1) % 3; uint f2 = (f0 + 2) % 3; forRange (String frag0, ranges[i0]->All()) { if (testMap.ContainsKey(frag0)) { ShaderSet* shaderSet = testMap.FindPointer(frag0); forRange (Shader* shader, shaderSet->All()) shaders.Insert(shader); } else { forRange (String frag1, ranges[i1]->All()) { forRange (String frag2, ranges[i2]->All()) { String fragmentNames[] = {frag0, frag1, frag2}; Shader* shader = GetOrCreateShader(fragmentNames[f0], fragmentNames[f1], fragmentNames[f2], mCompositeShaders); shaders.Insert(shader); } } // Special case for composites as a post process if (index == 1) { Shader* shader = GetOrCreateShader(cPostVertex, frag0, String(), mCompositeShaders); shaders.Insert(shader); } } } } void GraphicsEngine::FindShadersToRemove(Array<String>& elementRange, ShaderSetMap& testMap, ShaderSet& shaders) { forRange (String name, elementRange.All()) { // Composites can not be in the map if the composite exists because one of // its fragments didn't compile if (testMap.ContainsKey(name) == false) continue; ShaderSet* shaderSet = testMap.FindPointer(name); forRange (Shader* shader, shaderSet->All()) { shaders.Insert(shader); mCompositeShaders.Erase(shader->mName); } } } void GraphicsEngine::AddToShaderMaps(ShaderSet& shaders) { forRange (Shader* shader, shaders.All()) { mShaderCoreVertexMap[shader->mCoreVertex].Insert(shader); mShaderCompositeMap[shader->mComposite].Insert(shader); mShaderRenderPassMap[shader->mRenderPass].Insert(shader); } } void GraphicsEngine::RemoveFromShaderMaps(ShaderSet& shaders) { forRange (Shader* shader, shaders.All()) { RemoveFromShaderMap(mShaderCoreVertexMap, shader->mCoreVertex, shader); RemoveFromShaderMap(mShaderCompositeMap, shader->mComposite, shader); RemoveFromShaderMap(mShaderRenderPassMap, shader->mRenderPass, shader); } } void GraphicsEngine::RemoveFromShaderMap(ShaderSetMap& shaderMap, StringParam elementName, Shader* shader) { if (shaderMap.ContainsKey(elementName)) { shaderMap[elementName].Erase(shader); if (shaderMap[elementName].Empty()) shaderMap.Erase(elementName); } } void GraphicsEngine::ProcessModifiedScripts(LibraryRef library) { forRange (BoundType* type, library->BoundTypes.Values()) { String typeName = type->Name; // If already an entry for this type then inputs might have changed bool oldEntries = mComponentShaderProperties.ContainsKey(typeName); mComponentShaderProperties.Erase(typeName); // Only look for inputs on component types if (type->IsA(LightningTypeId(Component))) { forRange (Property* metaProperty, type->GetProperties()) { forRange (MetaShaderInput* shaderInput, metaProperty->HasAll<MetaShaderInput>()) { ShaderMetaProperty shaderProperty; shaderProperty.mMetaPropertyName = metaProperty->Name; shaderProperty.mFragmentName = shaderInput->mFragmentName; shaderProperty.mInputName = shaderInput->mInputName; mComponentShaderProperties[typeName].PushBack(shaderProperty); } } } bool newEntries = mComponentShaderProperties.ContainsKey(typeName); // Forward to Graphicals if any relevant changes if (oldEntries || newEntries) { ShaderInputsEvent event; event.mType = type; DispatchEvent(Events::ShaderInputsModified, &event); } } } LightningFragmentType::Enum GraphicsEngine::GetFragmentType(MaterialBlock* materialBlock) { return mShaderGenerator->mFragmentTypes.FindValue(LightningVirtualTypeId(materialBlock)->Name, LightningFragmentType::Fragment); } HandleOf<RenderTarget> GraphicsEngine::GetRenderTarget(uint width, uint height, TextureFormat::Enum format, SamplerSettings samplerSettings) { return mRenderTargetManager.GetRenderTarget(width, height, format, samplerSettings); } HandleOf<RenderTarget> GraphicsEngine::GetRenderTarget(HandleOf<Texture> texture) { return mRenderTargetManager.GetRenderTarget(texture); } void GraphicsEngine::ClearRenderTargets() { mRenderTargetManager.ClearRenderTargets(); } void GraphicsEngine::ForceCompileAllShaders() { BlockingTaskEvent event("Compiling"); PL::gEngine->DispatchEvent(Events::BlockingTaskStart, &event); ShaderSet allShaders; allShaders.Append(mCompositeShaders.Values()); allShaders.Append(mPostProcessShaders.Values()); if (allShaders.Empty()) return; AddShadersJob* addShadersJob = new AddShadersJob(mRendererJobQueue); bool compiled = mShaderGenerator->BuildShaders(allShaders, mUniqueComposites, addShadersJob->mShaders); ErrorIf(!compiled, "Shaders did not compile after composition."); // Blocking task is ended in the return exectute of the job, // mForceCompileBatchCount cannot be 0 here. addShadersJob->mForceCompileBatchCount = 10; AddRendererJob(addShadersJob); } void GraphicsEngine::ModifiedFragment(LightningFragmentType::Enum type, StringParam name) { switch (type) { case Plasma::LightningFragmentType::CoreVertex: mModifiedCoreVertex.PushBack(name); break; case Plasma::LightningFragmentType::RenderPass: mModifiedRenderPass.PushBack(name); break; case Plasma::LightningFragmentType::PostProcess: mModifiedPostProcess.PushBack(name); break; default: break; } } void GraphicsEngine::RemovedFragment(LightningFragmentType::Enum type, StringParam name) { switch (type) { case Plasma::LightningFragmentType::CoreVertex: mRemovedCoreVertex.PushBack(name); break; case Plasma::LightningFragmentType::RenderPass: mRemovedRenderPass.PushBack(name); break; case Plasma::LightningFragmentType::PostProcess: mRemovedPostProcess.PushBack(name); break; default: break; } } void GraphicsEngine::OnCompileLightningFragments(LightningCompileFragmentEvent* event) { String libraryName = BuildString(event->mOwningLibrary->Name, "Fragments"); event->mReturnedLibrary = mShaderGenerator->BuildFragmentsLibrary(event->mDependencies, event->mFragments, libraryName); } void GraphicsEngine::OnScriptsCompiledPrePatch(LightningCompileEvent* event) { forRange (ResourceLibrary* modifiedLibrary, event->mModifiedLibraries.All()) { if (!modifiedLibrary->mSwapFragment.HasPendingLibrary()) continue; LightningShaderIRLibraryRef currentLibrary = mShaderGenerator->GetCurrentInternalLibrary(modifiedLibrary->mSwapFragment.mCurrentLibrary); LightningFragmentTypeMap& currentFragmentTypes = mShaderGenerator->mFragmentTypes; LightningShaderIRLibraryRef pendingLibrary = mShaderGenerator->GetPendingInternalLibrary(modifiedLibrary->mSwapFragment.mPendingLibrary); LightningFragmentTypeMap& pendingFragmentTypes = mShaderGenerator->mPendingFragmentTypes[modifiedLibrary->mSwapFragment.mPendingLibrary]; // Find removed types if (currentLibrary != nullptr) { forRange (LightningShaderIRType* shaderType, currentLibrary->mTypes.Values()) { ShaderIRTypeMeta* shaderTypeMeta = shaderType->mMeta; if (shaderTypeMeta == nullptr) continue; // @Nate: This flag doesn't exist anymore. // if (shaderType->mFlags.IsSet(ShaderTypeFlags::Native)) // continue; // Skip if type still exists if (pendingLibrary->mTypes.ContainsKey(shaderTypeMeta->mLightningName)) continue; LightningFragmentType::Enum fragmentType = currentFragmentTypes.FindValue(shaderTypeMeta->mLightningName, LightningFragmentType::Fragment); RemovedFragment(fragmentType, shaderTypeMeta->mLightningName); } } // Find added/modified types if (mModifiedFragmentFiles.Empty() == false) { forRange (LightningShaderIRType* shaderType, pendingLibrary->mTypes.Values()) { ShaderIRTypeMeta* shaderTypeMeta = shaderType->mMeta; if (shaderTypeMeta == nullptr) continue; // @Nate: This flag doesn't exist anymore. // if (shaderType->mFlags.IsSet(ShaderTypeFlags::Native)) // continue; if (shaderTypeMeta->mComplexUserData.GetSize() == 0) continue; // Identify new/modified types. // We currently only have one class written to the complex user data // so we can hard-code passing 0 in (for the index). FragmentUserData& fragmentUserData = shaderTypeMeta->mComplexUserData.ReadObject<FragmentUserData>(0); String resourceName = fragmentUserData.mResourceName; if (mModifiedFragmentFiles.Contains(resourceName)) { // Check for fragments that used to have a special attribute and add // them to the appropriate removed list LightningFragmentType::Enum currentFragmentType = currentFragmentTypes.FindValue(shaderTypeMeta->mLightningName, LightningFragmentType::Fragment); LightningFragmentType::Enum pendingFragmentType = pendingFragmentTypes.FindValue(shaderTypeMeta->mLightningName, LightningFragmentType::Fragment); if (pendingFragmentType != currentFragmentType) RemovedFragment(currentFragmentType, shaderTypeMeta->mLightningName); ModifiedFragment(pendingFragmentType, shaderTypeMeta->mLightningName); // If current type is fragment and pending type isn't then any // affected composites are just going to get removed and don't need to // be checked if (pendingFragmentType == LightningFragmentType::Fragment) { forRange (UniqueComposite& composite, mUniqueComposites.Values()) { if (composite.mFragmentNameMap.Contains(shaderTypeMeta->mLightningName)) mModifiedComposites.Insert(composite.mName); } } // Find all types dependent on this one and also list them as modified HashSet<LightningShaderIRType*> dependents; pendingLibrary->GetAllDependents(shaderType, dependents); forRange (LightningShaderIRType* dependent, dependents.All()) { ShaderIRTypeMeta* dependentTypeMeta = dependent->mMeta; if (dependentTypeMeta == nullptr) continue; LightningFragmentType::Enum dependentType = pendingFragmentTypes.FindValue(dependentTypeMeta->mLightningName, LightningFragmentType::Fragment); // Do not need to check composites unless it's a regular fragment // type Composites will otherwise be handled by the other fragment // types being modified if (dependentType == LightningFragmentType::Fragment) { // Check all composites // Post patch still needs to run composite update on materials, // but if a composite results in being removed it will correctly // be removed from this list forRange (UniqueComposite& composite, mUniqueComposites.Values()) { if (composite.mFragmentNameMap.Contains(dependentTypeMeta->mLightningName)) mModifiedComposites.Insert(composite.mName); } } else { ModifiedFragment(dependentType, dependentTypeMeta->mLightningName); } } } } } } mRemovedFragmentFiles.Clear(); mModifiedFragmentFiles.Clear(); MaterialManager::GetInstance()->ReInitializeRemoveComponents(); } void GraphicsEngine::OnScriptsCompiledCommit(LightningCompileEvent* event) { // Update the old libraries with the new ones mNewLibrariesCommitted = mShaderGenerator->Commit(event); // After fragment libraries are committed component shader inputs can be // processed forRange (ResourceLibrary* modifiedLibrary, event->mModifiedLibraries.All()) { if (modifiedLibrary->mSwapScript.HasPendingLibrary()) ProcessModifiedScripts(modifiedLibrary->mSwapScript.mPendingLibrary); } } void GraphicsEngine::OnScriptsCompiledPostPatch(LightningCompileEvent* event) { MaterialManager::GetInstance()->ReInitializeAddComponents(); // Don't do anything if no new fragment libraries were made if (mNewLibrariesCommitted == false) return; mNewLibrariesCommitted = false; MaterialFactory::GetInstance()->UpdateRestrictedComponents(mShaderGenerator->mCurrentToInternal, mShaderGenerator->mFragmentTypes); // Re-Initialize composites after new types have been committed forRange (Resource* resource, MaterialManager::GetInstance()->AllResources()) { Material* material = (Material*)resource; if (mAddedMaterialsForComposites.Contains(material)) UpdateUniqueComposites(material, UniqueCompositeOp::Add); else UpdateUniqueComposites(material, UniqueCompositeOp::Modify); AddMaterial(material); material->SendModified(); } mAddedMaterialsForComposites.Clear(); CompileShaders(); } void GraphicsEngine::OnScriptCompilationFailed(Event* event) { forRange (Material* material, mAddedMaterialsForComposites.All()) { UpdateUniqueComposites(material, UniqueCompositeOp::Add); AddMaterial(material); } mAddedMaterialsForComposites.Clear(); // If scripts failed, we want to update any material modifications to use the // old fragment library if (!mModifiedComposites.Empty() || !mRemovedComposites.Empty()) CompileShaders(); } void GraphicsEngine::UpdateUniqueComposites(Material* material, UniqueCompositeOp::Enum uniqueCompositeOp) { if (uniqueCompositeOp == UniqueCompositeOp::Add) { ErrorIf(material->mRenderData != nullptr, "Material has already been added."); material->UpdateCompositeName(); AddComposite(material); } else if (uniqueCompositeOp == UniqueCompositeOp::Remove) { ErrorIf(material->mRenderData == nullptr, "Material has already been removed."); RemoveComposite(material->mCompositeName); } else if (uniqueCompositeOp == UniqueCompositeOp::Modify) { String oldCompositeName = material->mCompositeName; material->UpdateCompositeName(); String compositeName = material->mCompositeName; if (compositeName == oldCompositeName) return; RemoveComposite(oldCompositeName); AddComposite(material); } } void GraphicsEngine::CompileShaders() { if (mShaderGenerator->mCurrentToInternal.Empty()) return; // Find shaders to remove ShaderSet shadersToRemove; Array<String> removedComposites; removedComposites.Append(mRemovedComposites.All()); FindShadersToRemove(mRemovedCoreVertex, mShaderCoreVertexMap, shadersToRemove); FindShadersToRemove(removedComposites, mShaderCompositeMap, shadersToRemove); FindShadersToRemove(mRemovedRenderPass, mShaderRenderPassMap, shadersToRemove); RemoveFromShaderMaps(shadersToRemove); forRange (String fragment, mRemovedPostProcess.All()) { String shaderName = BuildString(cPostVertex, fragment); shadersToRemove.Insert(mPostProcessShaders[shaderName]); mPostProcessShaders.Erase(shaderName); } if (shadersToRemove.Empty() == false) { RemoveShadersJob* removeShadersJob = new RemoveShadersJob(); forRange (Shader* shader, shadersToRemove.All()) { ShaderEntry entry(shader); removeShadersJob->mShaders.PushBack(entry); gShaderPool->DeallocateType(shader); } AddRendererJob(removeShadersJob); } mRemovedCoreVertex.Clear(); mRemovedComposites.Clear(); mRemovedRenderPass.Clear(); mRemovedPostProcess.Clear(); // Find shaders to compile ShaderSet shadersToCompile; Array<String> compositeNames; compositeNames.Append(mUniqueComposites.Keys()); Array<String> modifiedComposites; modifiedComposites.Append(mModifiedComposites.All()); Array<String>& coreVertexFragments = mShaderGenerator->mCoreVertexFragments; Array<String>& renderPassFragments = mShaderGenerator->mRenderPassFragments; // Process based on modified lists FindShadersToCompile( mModifiedCoreVertex, compositeNames, renderPassFragments, mShaderCoreVertexMap, 0, shadersToCompile); FindShadersToCompile( coreVertexFragments, modifiedComposites, renderPassFragments, mShaderCompositeMap, 1, shadersToCompile); FindShadersToCompile( coreVertexFragments, compositeNames, mModifiedRenderPass, mShaderRenderPassMap, 2, shadersToCompile); AddToShaderMaps(shadersToCompile); forRange (String fragment, mModifiedPostProcess.All()) { Shader* shader = GetOrCreateShader(cPostVertex, fragment, String(), mPostProcessShaders); shadersToCompile.Insert(shader); } mModifiedCoreVertex.Clear(); mModifiedComposites.Clear(); mModifiedRenderPass.Clear(); mModifiedPostProcess.Clear(); forRange (Shader* shader, shadersToCompile) { shader->mSentToRenderer = false; } #if !defined(PlasmaLazyShaderCompositing) if (shadersToCompile.Empty() == false) { AddShadersJob* addShadersJob = new AddShadersJob(mRendererJobQueue); bool compiled = mShaderGenerator->BuildShaders(shadersToCompile, mUniqueComposites, addShadersJob->mShaders); ErrorIf(!compiled, "Shaders did not compile after composition."); AddRendererJob(addShadersJob); } #endif } void GraphicsEngine::WriteTextureToFile(HandleOf<Texture> texture, StringParam filename) { mDelayedTextureToFile.PushBack(TextureToFile(texture, filename)); } void SaveToImageJob::Execute() { Status status; SaveImage(status, mFilename, mImage, mWidth, mHeight, mFormat, mImageType); delete[] mImage; } } // namespace Plasma
//=============================================================================== // Copyright (c) 2007-2016 Advanced Micro Devices, Inc. All rights reserved. // Copyright (c) 2004-2006 ATI Technologies Inc. //=============================================================================== // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files(the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and / or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions : // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #include <assert.h> #include <math.h> #include <float.h> #include <assert.h> #include "Common.h" #include "3dquant_constants.h" #include "3dquant_vpc.h" #include "BC7_Definitions.h" #include "debug.h" #include <mutex> #ifdef BC7_DEBUG_TO_RESULTS_TXT FILE *fp; #endif #define EPSILON 0.000001 #define MAX_TRY 20 #define DBL_MAX_EXP 1024 #undef TRACE #define MAX_TRACE 250000 struct TRACE { int k; double d; }; static int trcnts[MAX_CLUSTERS][MAX_ENTRIES_QUANT_TRACE]; #define USE_TRACE_WITH_DYNAMIC_MEM #ifdef USE_TRACE_WITH_DYNAMIC_MEM int* amd_codes[MAX_CLUSTERS][MAX_ENTRIES_QUANT_TRACE] = {}; TRACE* amd_trs[MAX_CLUSTERS][MAX_ENTRIES_QUANT_TRACE] = {}; #else int amd_codes[MAX_CLUSTERS][MAX_ENTRIES_QUANT_TRACE][MAX_TRACE]; TRACE amd_trs[MAX_CLUSTERS][MAX_ENTRIES_QUANT_TRACE][MAX_TRACE]; #endif static int g_Quant_init = 0; void traceBuilder (int numEntries, int numClusters,struct TRACE tr [], int code[], int *trcnt ); std::mutex mtx; void Quant_Init(void) { if (g_Quant_init > 0) { g_Quant_init++; return; } if (amd_codes[0][0]) return; mtx.lock(); for ( int numClusters = 0; numClusters < MAX_CLUSTERS; numClusters++ ) { for ( int numEntries = 0; numEntries < MAX_ENTRIES_QUANT_TRACE; numEntries++ ) { #ifdef USE_TRACE_WITH_DYNAMIC_MEM amd_codes[ numClusters][ numEntries ] = new int[ MAX_TRACE ]; amd_trs[ numClusters ][ numEntries ] = new TRACE[ MAX_TRACE ]; assert(amd_codes[ numClusters][ numEntries ]); assert(amd_trs[ numClusters ][ numEntries ]); #endif traceBuilder ( numEntries+1, numClusters+1, amd_trs[numClusters][numEntries], amd_codes[numClusters][numEntries], trcnts[numClusters]+(numEntries)); } } g_Quant_init++; mtx.unlock(); } void Quant_DeInit(void) { g_Quant_init--; if (g_Quant_init > 1) { return; } else { g_Quant_init = 0; // Reset in case user called Quant_DeInit too many times without matching Quant_Init if (amd_codes[0][0] == nullptr) return; #ifdef USE_TRACE_WITH_DYNAMIC_MEM for (int i = 0; i < MAX_CLUSTERS; i++) { for (int j = 0; j < MAX_ENTRIES_QUANT_TRACE; j++) { if (amd_codes[i][j]) { delete[] amd_codes[i][j]; amd_codes[i][j] = nullptr; } if (amd_trs[i][j]) { delete[] amd_trs[i][j]; amd_trs[i][j] = nullptr; } } } #endif } } //========================================================================================= void sugar(void){ #ifdef USE_DBGTRACE DbgTrace(("sugar!")) #endif }; inline int a_compare( const void *arg1, const void *arg2 ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif if (((a* )arg1)->d-((a* )arg2)->d > 0 ) return 1; if (((a* )arg1)->d-((a* )arg2)->d < 0 ) return -1; return 0; }; // // We ignore the issue of ordering equal elements here, though it can affect results abit // void sortProjection(double projection[MAX_ENTRIES], int order[MAX_ENTRIES], int numEntries) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i; a what[MAX_ENTRIES+MAX_PARTITIONS_TABLE]; for (i=0; i < numEntries;i++) what[what[i].i=i].d = projection[i]; qsort((void*)&what, numEntries, sizeof(a),a_compare); for (i=0; i < numEntries;i++) order[i]=what[i].i; }; void covariance(double data[][DIMENSION], int numEntries, double cov[DIMENSION][DIMENSION]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,j,k; for(i=0;i<DIMENSION;i++) for(j=0;j<=i;j++) { cov[i][j]=0; for(k=0;k<numEntries;k++) cov[i][j]+=data[k][i]*data[k][j]; } for(i=0;i<DIMENSION;i++) for(j=i+1;j<DIMENSION;j++) cov[i][j] = cov[j][i]; } void covariance_d(double data[][MAX_DIMENSION_BIG], int numEntries, double cov[MAX_DIMENSION_BIG][MAX_DIMENSION_BIG], int dimension) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,j,k; for(i=0;i<dimension;i++) for(j=0;j<=i;j++) { cov[i][j]=0; for(k=0;k<numEntries;k++) cov[i][j]+=data[k][i]*data[k][j]; } for(i=0;i<dimension;i++) for(j=i+1;j<dimension;j++) cov[i][j] = cov[j][i]; } void centerInPlace(double data[][DIMENSION], int numEntries, double mean[DIMENSION]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,k; for(i=0;i<DIMENSION;i++) { mean[i]=0; for(k=0;k<numEntries;k++) mean[i]+=data[k][i]; } if (!numEntries) return; for(i=0;i<DIMENSION;i++) { mean[i]/=(double) numEntries; for(k=0;k<numEntries;k++) data[k][i]-=mean[i]; } } void centerInPlace_d(double data[][MAX_DIMENSION_BIG], int numEntries, double mean[MAX_DIMENSION_BIG], int dimension) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,k; for(i=0;i<dimension;i++) { mean[i]=0; for(k=0;k<numEntries;k++) mean[i]+=data[k][i]; } if (!numEntries) return; for(i=0;i<dimension;i++) { mean[i]/=(double) numEntries; for(k=0;k<numEntries;k++) data[k][i]-=mean[i]; } } void project(double data[][DIMENSION], int numEntries, double vector[DIMENSION], double projection[MAX_ENTRIES]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // assume that vector is normalized already int i,k; for(k=0;k<numEntries;k++) { projection[k]=0; for(i=0;i<DIMENSION;i++) { projection[k]+=data[k][i]*vector[i]; } } } void project_d(double data[][MAX_DIMENSION_BIG], int numEntries, double vector[MAX_DIMENSION_BIG], double projection[MAX_ENTRIES], int dimension) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // assume that vector is normalized already int i,k; for(k=0;k<numEntries;k++) { projection[k]=0; for(i=0;i<dimension;i++) { projection[k]+=data[k][i]*vector[i]; } } } void eigenVector(double cov[DIMENSION][DIMENSION], double vector[DIMENSION]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // calculate an eigenvecto corresponding to a biggest eigenvalue // will work for non-zero non-negative matricies only #define EV_ITERATION_NUMBER 20 #define EV_SLACK 2 /* additive for exp base 2)*/ int i,j,k,l, m, n,p,q; double c[2][DIMENSION][DIMENSION]; double maxDiag; for(i=0;i<DIMENSION;i++) for(j=0;j<DIMENSION;j++) c[0][i][j] =cov[i][j]; p = (int) floor(log( (DBL_MAX_EXP - EV_SLACK) / ceil (log((double)DIMENSION)/log(2.)) )/log(2.)); assert(p>0); p = p >0 ? p : 1; q = (EV_ITERATION_NUMBER+p-1) / p; l=0; for(n=0;n<q; n++) { maxDiag = 0; for(i=0;i<DIMENSION;i++) maxDiag = c[l][i][i] > maxDiag ? c[l][i][i] : maxDiag; if (maxDiag<=0) { sugar(); return; } assert(maxDiag > 0); for(i=0;i<DIMENSION;i++) for(j=0;j<DIMENSION;j++) c[l][i][j] /=maxDiag; for(m=0;m<p;m++) { for(i=0;i<DIMENSION;i++) for(j=0;j<DIMENSION;j++) { c[1-l][i][j]=0; for(k=0;k<DIMENSION;k++) c[1-l][i][j]+=c[l][i][k]*c[l][k][j]; } l=1-l; } } maxDiag = 0; k =0; for(i=0;i<DIMENSION;i++) { k = c[l][i][i] > maxDiag ? i : k; maxDiag = c[l][i][i] > maxDiag ? c[l][i][i] : maxDiag; } double t; t=0; for(i=0;i<DIMENSION;i++) { t+=c[l][k][i]*c[l][k][i]; vector[i]=c[l][k][i]; } // normalization is really optional t= sqrt(t); assert(t>0); if (t<=0) { sugar(); return; } for(i=0;i<DIMENSION;i++) vector[i]/=t; } void eigenVector_d(double cov[MAX_DIMENSION_BIG][MAX_DIMENSION_BIG], double vector[MAX_DIMENSION_BIG], int dimension) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // calculate an eigenvecto corresponding to a biggest eigenvalue // will work for non-zero non-negative matricies only #define EV_ITERATION_NUMBER 20 #define EV_SLACK 2 /* additive for exp base 2)*/ int i,j,k,l, m, n,p,q; double c[2][MAX_DIMENSION_BIG][MAX_DIMENSION_BIG]; double maxDiag; for(i=0;i<dimension;i++) for(j=0;j<dimension;j++) c[0][i][j] =cov[i][j]; p = (int) floor(log( (DBL_MAX_EXP - EV_SLACK) / ceil (log((double)dimension)/log(2.)) )/log(2.)); assert(p>0); p = p >0 ? p : 1; q = (EV_ITERATION_NUMBER+p-1) / p; l=0; for(n=0;n<q; n++) { maxDiag = 0; for(i=0;i<dimension;i++) maxDiag = c[l][i][i] > maxDiag ? c[l][i][i] : maxDiag; if (maxDiag<=0) { sugar(); return; } assert(maxDiag >0); for(i=0;i<dimension;i++) for(j=0;j<dimension;j++) c[l][i][j] /=maxDiag; for(m=0;m<p;m++) { for(i=0;i<dimension;i++) for(j=0;j<dimension;j++) { double temp=0; for(k=0;k<dimension;k++) { // Notes: // This is the most consuming portion of the code and needs optimizing for perfromance temp += c[l][i][k]*c[l][k][j]; } c[1-l][i][j]=temp; } l=1-l; } } maxDiag = 0; k =0; for(i=0;i<dimension;i++) { k = c[l][i][i] > maxDiag ? i : k; maxDiag = c[l][i][i] > maxDiag ? c[l][i][i] : maxDiag; } double t; t=0; for(i=0;i<dimension;i++) { t+=c[l][k][i]*c[l][k][i]; vector[i]=c[l][k][i]; } // normalization is really optional t= sqrt(t); assert(t>0); if (t<=0) { sugar(); return; } for(i=0;i<dimension;i++) vector[i]/=t; } double partition2(double data[][DIMENSION], int numEntries,int index[]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,j,k; double cov[2][DIMENSION][DIMENSION]; double center[2][DIMENSION]; double cnt[2] ={0,0}; double vector[2][DIMENSION]; double acc=0; for(k=0;k<numEntries;k++) cnt[index[k]]++; for(i=0;i<DIMENSION;i++) { center[0][i]=center[1][i]=0; for(k=0;k<numEntries;k++) center[index[k]][i]+=data[k][i]; } for(i=0;i<DIMENSION;i++) for(j=0;j<=i;j++) { cov[0][i][j]=cov[1][i][j]=0; for(k=0;k<numEntries;k++) cov[index[k]][i][j]+=data[k][i]*data[k][j]; } for(i=0;i<DIMENSION;i++) for(j=0;j<=i;j++) for (k=0;k<2;k++) if (cnt[k]!=0) cov[k][i][j] -=center[k][i]*center[k][j]/(double)cnt[k]; for(i=0;i<DIMENSION;i++) for(j=i+1;j<DIMENSION;j++) for(k=0;k<2;k++) cov[k][i][j] = cov[k][j][i]; for(k=0;k<2;k++) eigenVector(cov[k], vector[k]); // assume the returned vector is nomalized for(i=0;i<DIMENSION;i++) for(k=0;k<2;k++) acc+=cov[k][i][i]; for(i=0;i<DIMENSION;i++) for(j=0;j<DIMENSION;j++) for(k=0;k<2;k++) acc-=cov[k][i][j]*vector[k][i]*vector[k][j]; return(acc); } void quantEven(double data[MAX_ENTRIES][DIMENSION],int numEntries, int numClusters, int index[MAX_ENTRIES]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // Data should be centered, otherwise will not work // The running time (number of iteration of the external loop) is // binomial(numEntries+numClusters-2, numClusters-1) // First cluster is always used, (without loss of generality) // Ramp should be shifted such, that the first element ramp[0] is 0 int i,k; int level; double t,s; int c =1; int cluster[MAX_CLUSTERS]; int bestCluster[MAX_CLUSTERS]; // stores the las index for the cluster double dpAcc [MAX_CLUSTERS][DIMENSION]; double index2Acc [MAX_CLUSTERS]; // for backtraking double indexAcc [MAX_CLUSTERS]; double dRamp2[MAX_CLUSTERS]; // first differenses of the (shifted) ramp squared double S; double nErrorNum=0; // not the actual error, but some (decreasing) linear functional of it represented // as numerator and denominator double nErrorDen=1; level=1; bestCluster[0]=cluster[0]=cluster[1]=numEntries; indexAcc[0]=index2Acc[0]=indexAcc[1]=index2Acc[1]=0; for(i=0;i<DIMENSION;i++) dpAcc[0][i]=dpAcc[1][i]=0; S = 1/sqrt((double) numEntries); for(i=1;i<MAX_CLUSTERS;i++) { dRamp2[i] = 2*i-1; } level=1; do { k = --cluster[level-1]; indexAcc [level] += S; index2Acc [level] += dRamp2 [level]; t=0; for(i=0;i<DIMENSION;i++) { // using scaled ramp instead of non-scaled here effectively scales the data, so // the resulting quantisation will be the same, but the error metric value will be different dpAcc [level][i] += data[k][i]; t += dpAcc[level][i] * dpAcc[level][i]; } if ((cluster[level]!= numEntries || cluster[level-1]!=0) && nErrorNum * (s=index2Acc[level]-indexAcc[level] * indexAcc[level]) < nErrorDen * t) { nErrorNum=t; nErrorDen=s; for(i=0;i<=level;i++) bestCluster[i]=cluster[i]; } c++; if (level < numClusters - 1 ) { // go up level++; indexAcc [level]=indexAcc [level-1]; index2Acc[level]=index2Acc[level-1]; for(i=0;i<DIMENSION;i++) dpAcc [level][i]=dpAcc [level-1][i]; } else while ((level-1) && cluster[level-1]==cluster[level-2]) level--; cluster[level]=numEntries; } while (level != 1 || cluster[level-1] != 0); for (level=i=0;i< numEntries;i++) { while (i==bestCluster[level]) level++; index[i]=level; } } void quantLineConstr(double data[][DIMENSION], int order[MAX_ENTRIES],int numEntries, int numClusters, int index[MAX_ENTRIES]) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // Data should be centered, otherwise will not work // The running time (number of iteration of the external loop) is // binomial(numEntries+numClusters-2, numClusters-1) // Index just defines which points should be combined in a cluster int i,j,k; int level; double t,s; // We need paddingof 0 on -1 index int cluster_[MAX_CLUSTERS+1]={0}; int *cluster = cluster_+1; int bestCluster[MAX_CLUSTERS]; // stores the las index for the cluster double cov[DIMENSION][DIMENSION]; double dir[DIMENSION]; double gcAcc[MAX_CLUSTERS][DIMENSION];// Clusters' graviti centers double gcSAcc[MAX_CLUSTERS][DIMENSION];// Clusters' graviti centers double nError=0; // not the actual error, but some (decreasing) linear functional of it represented // as numerator and denominator level=1; bestCluster[0]=cluster[0]=cluster[1]=numEntries; for(i=0;i<DIMENSION;i++) gcAcc[0][i]=gcAcc[1][i]=0; level=1; do { assert(level >0); k = order[--cluster[level-1]]; s=(cluster[level-1]-cluster[level-2]) == 0 ? 0: 1/sqrt( (double) (cluster[level-1]-cluster[level-2])); // see cluster_ decl for // cluster[-1] value t=1/sqrt((double) (numEntries-cluster[level-1])); for(i=0;i<DIMENSION;i++) { gcAcc[level ][i] += data[k][i]; gcAcc[level-1][i] -= data[k][i]; gcSAcc[level-1][i] = gcAcc[level-1][i] * s; gcSAcc[level ][i] = gcAcc[level ][i] * t; } covariance(gcSAcc, level+1, cov); eigenVector(cov, dir); // assume the vector is normalized here t=0; for(i=0;i<DIMENSION;i++) for(j=0;j<DIMENSION;j++) t+= cov[i][j]*dir[i]*dir[j]; if (t>nError) { nError=t; for(i=0;i<=level;i++) bestCluster[i]=cluster[i]; } if (level < numClusters - 1 ) { // go up level++; for(i=0;i<DIMENSION;i++) gcAcc [level][i]=0; } else while ((level-1) && cluster[level-1]==cluster[level-2]) { level--; for(i=0;i<DIMENSION;i++) gcAcc [level][i]+=gcAcc [level+1][i]; } cluster[level]=numEntries; } while (level != 1 || cluster[level-1] != 1); for (level=i=0;i< numEntries;i++) { while (i==bestCluster[level]) level++; index[order[i]]=level; } } double totalError(double data[MAX_ENTRIES][DIMENSION],double data2[MAX_ENTRIES][DIMENSION],int numEntries) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,j; double t=0; for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) t+= (data[i][j]-data2[i][j])*(data[i][j]-data2[i][j]); return t; }; double totalError_d(double data[MAX_ENTRIES][MAX_DIMENSION_BIG],double data2[MAX_ENTRIES][MAX_DIMENSION_BIG],int numEntries, int dimension) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,j; double t=0; for (i=0;i<numEntries;i++) for (j=0;j<dimension;j++) t+= (data[i][j]-data2[i][j])*(data[i][j]-data2[i][j]); return t; }; double optQuantEven( double data[MAX_ENTRIES][DIMENSION], int numEntries, int numClusters, int index[MAX_ENTRIES], double out[MAX_ENTRIES][DIMENSION], double direction [DIMENSION],double *step ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int maxTry=MAX_TRY; int i,j,k; double t,s; double centered[MAX_ENTRIES][DIMENSION]; double ordered[MAX_ENTRIES][DIMENSION]; double mean[DIMENSION]; double cov[DIMENSION][DIMENSION]; double projected[MAX_ENTRIES]; int order[MAX_ENTRIES]; for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) centered[i][j]=data[i][j]; centerInPlace(centered, numEntries, mean); covariance(centered, numEntries, cov); // check if they all are the same t=0; for (j=0;j<DIMENSION;j++) t+= cov[j][j]; if (t==0 || numEntries==0) { for (i=0;i<numEntries;i++) { index[i]=0; for (j=0;j<DIMENSION;j++) out[i][j]=mean[j]; } return 0.; } eigenVector(cov, direction); project(centered, numEntries, direction, projected); for (i=0;i<maxTry;i++) { if (i) { t=0; for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=ordered[k][j]*index[k]; t+=direction[j]*direction[j]; } // Actually we don't need to normailize direction here, as the // optimal quntization (index) is invariant of the scale. // Hence we don't care about possible degenration of the <direction> either // though normally it should not happen // However, the EPSILON should be scaled, otherwise is does not make sense t = sqrt(t)*EPSILON; project(centered, numEntries, direction, projected); for (j=1; j < numEntries;j++) if (projected[order[j]] < projected[order[j-1]]-t /*EPSILON*/) break; if (j >= numEntries) break; } sortProjection(projected, order, numEntries); for (k=0;k<numEntries;k++) for (j=0;j<DIMENSION;j++) ordered[k][j]=centered[order[k]][j]; quantEven(ordered, numEntries, numClusters, index); } s=t=0; double q=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=ordered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; #ifdef USE_DBGTRACE if (t==0) DbgTrace(("l;lkjk")); #endif assert(t !=0); t = (t == 0 ? 0. : 1/t); for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) out[order[i]][j]=mean[j]+direction[j]*t*(index[i]-s); // normalize direction for output q=sqrt(q); *step=t*q; for (j=0;j<DIMENSION;j++) direction[j]/=q; return totalError(data,out,numEntries); }; int requantize(double data[MAX_ENTRIES][DIMENSION], double centers[MAX_CLUSTERS][DIMENSION], int numEntries, int numClusters,int index[MAX_ENTRIES] ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int i,j,k; double p,q; int cnt[MAX_CLUSTERS]; int change =0; for (i=0;i<numEntries;i++) { p=0; index[i]=0; for(k=0;k<DIMENSION;k++) p+=(data[i][k]-centers[index[i]][k])*(data[i][k]-centers[index[i]][k]); for(j=0;j<numClusters;j++) { q=0; for(k=0;k<DIMENSION;k++) q+=(data[i][k]-centers[j][k])*(data[i][k]-centers[j][k]); change |= q < p ? (j!= index[i]) : 0; index[i]= q < p ? j : index[i]; p = q < p ? q : p; } } for(j=0;j<numClusters;j++) cnt[j]=0; for(j=0;j<numClusters;j++) for(k=0;k<DIMENSION;k++) centers[j][k]=0; for (i=0;i<numEntries;i++) { cnt[index[i]]++; for(k=0;k<DIMENSION;k++) centers[index[i]][k]+=data[i][k]; } for(j=0;j<numClusters;j++) for(k=0;k<DIMENSION;k++) centers[j][k]/=(double) cnt[j]; return(change); } double optQuantLineConstr( double data[MAX_ENTRIES][DIMENSION], int numEntries, int numClusters, int index[MAX_ENTRIES], double out[MAX_ENTRIES][DIMENSION] ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int maxTry=MAX_TRY; int i,j,k; double t; double centered[MAX_ENTRIES][DIMENSION]; double mean[DIMENSION]; double cov[DIMENSION][DIMENSION]; double projected[MAX_ENTRIES]; double direction [DIMENSION]; int order[MAX_ENTRIES]; for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) centered[i][j]=data[i][j]; centerInPlace(centered, numEntries, mean); covariance(centered, numEntries, cov); // check if they all are the same t=0; for (j=0;j<DIMENSION;j++) t+= cov[j][j]; if (t==0 || numEntries==0) { for (i=0;i<numEntries;i++) { index[i]=0; for (j=0;j<DIMENSION;j++) out[i][j]=mean[j]; } return 0.; } eigenVector(cov, direction); project(centered, numEntries, direction, projected); for (i=0;i<maxTry;i++) { if (i) { t=0; for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=centered[k][j]*index[k]; t=direction[j]*direction[j]; } // Actually we don't need to normailize direction here, as the // optimal quntization (index) is invariant of the scale. // Hence we don't care about possible degenration of the <direction> either // though normally it should not happen // However, the EPSILON should be scaled, otherwise is does not make sense t = sqrt(t)*EPSILON; project(centered, numEntries, direction, projected); for (j=1; j < numEntries;j++) if (projected[order[j]] < projected[order[j-1]]-t /*EPSILON*/) break; if (j >= numEntries) break; } sortProjection(projected, order, numEntries); quantLineConstr(centered, order, numEntries, numClusters, index); } double gcAcc[MAX_CLUSTERS][DIMENSION]; double gcSAcc[MAX_CLUSTERS][DIMENSION]; double gcS[MAX_CLUSTERS]; for(i=0;i<MAX_CLUSTERS;i++) { gcS[i]=0; for(j=0;j<DIMENSION;j++) gcAcc[i][j]=0; } for (k=0;k<numEntries;k++) { gcS[index[k]]+=1; for (j=0;j<DIMENSION;j++) gcAcc[index[k]][j]+=centered[k][j]; } for(i=0;i<numClusters;i++) for (j=0;j<DIMENSION;j++) if (gcS[i]!=0) { gcSAcc[i][j] = gcAcc[i][j]/sqrt((double)gcS[i]); gcAcc[i][j] /= ((double)gcS[i]); } else gcSAcc[i][j] = 0; covariance(gcSAcc, numClusters, cov); eigenVector(cov, direction); // assume the vector is normalized here for(i=0;i<numClusters;i++) { gcS[i]=0; for (j=0;j<DIMENSION;j++) gcS[i]+=direction[j]*gcAcc[i][j]; } for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) out[i][j]=mean[j]+direction[j]*gcS[index[i]]; return totalError(data,out,numEntries); }; void quantTrace(double data[MAX_ENTRIES_QUANT_TRACE][DIMENSION],int numEntries, int numClusters, int index[MAX_ENTRIES_QUANT_TRACE]) { // Data should be centered, otherwise will not work int i,j,k; double sdata[2*MAX_ENTRIES][DIMENSION]; double dpAcc [DIMENSION]; double M =0; struct TRACE *tr ; tr=amd_trs[numClusters-1][numEntries-1]; int trcnt =trcnts[numClusters-1][numEntries-1]; int *code; code=amd_codes[numClusters-1][numEntries-1]; for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) { sdata[2*i][j]= data[i][j]; sdata[2*i+1][j]=-data[i][j]; } for (j=0;j<DIMENSION;j++) dpAcc[j]=0; k=-1; #define UROLL_STEP(i) \ dpAcc[0]+=sdata[tr[i].k][0];\ dpAcc[1]+=sdata[tr[i].k][1];\ dpAcc[2]+=sdata[tr[i].k][2];\ { double c; \ c = (dpAcc[0]*dpAcc[0]+dpAcc[1]*dpAcc[1]+dpAcc[2]*dpAcc[2])*tr[i].d;\ if (c > M) {k=i;M=c;};}; for (i=0;i+15<trcnt;i+=16) { UROLL_STEP(i) UROLL_STEP(i+1) UROLL_STEP(i+2) UROLL_STEP(i+3) UROLL_STEP(i+4) UROLL_STEP(i+5) UROLL_STEP(i+6) UROLL_STEP(i+7) UROLL_STEP(i+8) UROLL_STEP(i+9) UROLL_STEP(i+10) UROLL_STEP(i+11) UROLL_STEP(i+12) UROLL_STEP(i+13) UROLL_STEP(i+14) UROLL_STEP(i+15) } for (;i<trcnt;i++) { UROLL_STEP(i) } if ((k<0)||(k >=MAX_TRACE)) { // NP return; } k = code[k]; i=0; for (j=0;j<numEntries;j++) { while ((k & 1) ==0) { i++; k>>=1; } index[j]=i; k>>=1; } } void quantTrace_d(double data[MAX_ENTRIES_QUANT_TRACE][MAX_DIMENSION_BIG],int numEntries, int numClusters, int index[MAX_ENTRIES_QUANT_TRACE],int dimension) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // Data should be centered, otherwise will not work int i,j,k; double sdata[2*MAX_ENTRIES][MAX_DIMENSION_BIG]; double dpAcc [MAX_DIMENSION_BIG]; double M =0; struct TRACE *tr ; tr=amd_trs[numClusters-1][numEntries-1]; int trcnt =trcnts[numClusters-1][numEntries-1]; int *code; code=amd_codes[numClusters-1][numEntries-1]; for (i=0;i<numEntries;i++) for (j=0;j<dimension;j++) { sdata[2*i][j]= data[i][j]; sdata[2*i+1][j]=-data[i][j]; } for (j=0;j<dimension;j++) dpAcc[j]=0; k=-1; #define UROLL_STEP_1(i) \ dpAcc[0]+=sdata[tr[i].k][0];\ {\ double c; \ c = (dpAcc[0]*dpAcc[0])*tr[i].d;\ if (c > M) {k=i;M=c;};\ }; #define UROLL_STEP_2(i) \ dpAcc[0]+=sdata[tr[i].k][0];\ dpAcc[1]+=sdata[tr[i].k][1];\ { double c; \ c = (dpAcc[0]*dpAcc[0]+dpAcc[1]*dpAcc[1])*tr[i].d;\ if (c > M) {k=i;M=c;};}; #define UROLL_STEP_3(i) \ dpAcc[0]+=sdata[tr[i].k][0];\ dpAcc[1]+=sdata[tr[i].k][1];\ dpAcc[2]+=sdata[tr[i].k][2];\ { double c; \ c = (dpAcc[0]*dpAcc[0]+dpAcc[1]*dpAcc[1]+dpAcc[2]*dpAcc[2])*tr[i].d;\ if (c > M) {k=i;M=c;};}; #define UROLL_STEP_4(i) \ dpAcc[0]+=sdata[tr[i].k][0];\ dpAcc[1]+=sdata[tr[i].k][1];\ dpAcc[2]+=sdata[tr[i].k][2];\ dpAcc[3]+=sdata[tr[i].k][3];\ { double c; \ c = (dpAcc[0]*dpAcc[0]+dpAcc[1]*dpAcc[1]+dpAcc[2]*dpAcc[2]+dpAcc[3]*dpAcc[3])*tr[i].d;\ if (c > M) {k=i;M=c;};}; #undef UROLL_STEP #define UROLL_MACRO(UROLL_STEP){\ \ \ for (i=0;i+15<trcnt;i+=16)\ {\ UROLL_STEP(i)\ UROLL_STEP(i+1)\ UROLL_STEP(i+2)\ UROLL_STEP(i+3)\ UROLL_STEP(i+4)\ UROLL_STEP(i+5)\ UROLL_STEP(i+6)\ UROLL_STEP(i+7)\ UROLL_STEP(i+8)\ UROLL_STEP(i+9)\ UROLL_STEP(i+10)\ UROLL_STEP(i+11)\ UROLL_STEP(i+12)\ UROLL_STEP(i+13)\ UROLL_STEP(i+14)\ UROLL_STEP(i+15)\ }\ \ for (;i<trcnt;i++) {\ UROLL_STEP(i)\ }}; switch(dimension) { case 1: UROLL_MACRO(UROLL_STEP_1); break; case 2: UROLL_MACRO(UROLL_STEP_2); break; case 3: UROLL_MACRO(UROLL_STEP_3); break; case 4: UROLL_MACRO(UROLL_STEP_4); break; default: return; break; } if (k<0) { #ifdef USE_DBGTRACE DbgTrace(("ERROR: quatnTrace\n")); #endif return; } k = code[k]; i=0; for (j=0;j<numEntries;j++) { while ((k & 1) ==0) { i++; k>>=1; } index[j]=i; k>>=1; } } void quant_AnD_Shell(double* v_, int k, int n, int *idx) { #ifdef USE_DBGTRACE DbgTrace(()); #endif // input: // // v_ points, might be uncentered // k - number of points in the ramp // n - number of points in v_ // // output: // // index, uncentered, in the range 0..k-1 // #define MAX_BLOCK MAX_ENTRIES int i,j; double v[MAX_BLOCK]; double z[MAX_BLOCK]; a d[MAX_BLOCK]; double l; double mm; double r=0; int mi; assert((v_ != NULL) && (n>1) && (k>1)); double m, M, s, dm=0.; m=M=v_[0]; for (i=1; i < n;i++) { m = m < v_[i] ? m : v_[i]; M = M > v_[i] ? M : v_[i]; } if (M==m) { for (i=0; i < n;i++) idx[i]=0; return; } assert(M-m >0); s = (k-1)/(M-m); for (i=0; i < n;i++) { v[i] = v_[i]*s; idx[i]=(int)(z[i] = floor(v[i] +0.5 /* stabilizer*/ - m *s)); d[i].d = v[i]-z[i]- m *s; d[i].i = i; dm+= d[i].d; r += d[i].d*d[i].d; } if (n*r- dm*dm >= (double)(n-1)/4 /*slack*/ /2) { dm /= (double)n; for (i=0; i < n;i++) d[i].d -= dm; qsort((void*)&d, n, sizeof(a),a_compare); // got into fundamental simplex // move coordinate system origin to its center for (i=0; i < n;i++) d[i].d -= (2.*(double)i+1-(double)n)/2./(double)n; mm=l=0.; j=-1; for (i=0; i < n;i++) { l+=d[i].d; if (l < mm) { mm =l; j=i; } } // position which should be in 0 j = ++j % n; for (i=j; i < n;i++) idx[d[i].i]++; } // get rid of an offset in idx mi=idx[0]; for (i=1; i < n;i++) mi = mi < idx[i]? mi :idx[i]; for (i=0; i < n;i++) idx[i]-=mi; } double optQuantTrace( double data[MAX_ENTRIES][DIMENSION], int numEntries, int numClusters, int index_[MAX_ENTRIES], double out[MAX_ENTRIES][DIMENSION], double direction [DIMENSION],double *step ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int index[MAX_ENTRIES]; int maxTry=MAX_TRY; int i,j,k; double t,s; double centered[MAX_ENTRIES][DIMENSION]; double ordered[MAX_ENTRIES][DIMENSION]; double mean[DIMENSION]; double cov[DIMENSION][DIMENSION]; double projected[MAX_ENTRIES]; int order[MAX_ENTRIES]; for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) centered[i][j]=data[i][j]; centerInPlace(centered, numEntries, mean); covariance(centered, numEntries, cov); // check if they all are the same t=0; for (j=0;j<DIMENSION;j++) t+= cov[j][j]; if (t==0 || numEntries==0) { for (i=0;i<numEntries;i++) { index_[i]=0; for (j=0;j<DIMENSION;j++) out[i][j]=mean[j]; } return 0.; } eigenVector(cov, direction); project(centered, numEntries, direction, projected); for (i=0;i<maxTry;i++) { if (i) { t=0; for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=ordered[k][j]*index[k]; t+=direction[j]*direction[j]; } // Actually we don't need to normailize direction here, as the // optimal quntization (index) is invariant of the scale. // Hence we don't care about possible degenration of the <direction> either // though normally it should not happen // However, the EPSILON should be scaled, otherwise is does not make sense t = sqrt(t)*EPSILON; project(centered, numEntries, direction, projected); for (j=1; j < numEntries;j++) if (projected[order[j]] < projected[order[j-1]]-t /*EPSILON*/) break; if (j >= numEntries) break; } sortProjection(projected, order, numEntries); for (k=0;k<numEntries;k++) for (j=0;j<DIMENSION;j++) ordered[k][j]=centered[order[k]][j]; quantTrace(ordered, numEntries, numClusters, index); } s=t=0; double q=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=ordered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; #ifdef USE_DBGTRACE if (t==0) DbgTrace(("l;lkjk")); #endif assert(t !=0); t = (t == 0 ? 0. : 1/t); for (i=0;i<numEntries;i++) { for (j=0;j<DIMENSION;j++) out[order[i]][j]=mean[j]+direction[j]*t*(index[i]-s); index_[order[i]]=index[i]; } // normalize direction for output q=sqrt(q); *step=t*q; for (j=0;j<DIMENSION;j++) direction[j]/=q; return totalError(data,out,numEntries); } double optQuantTrace_d( double data[MAX_ENTRIES][MAX_DIMENSION_BIG], int numEntries, int numClusters, int index_[MAX_ENTRIES], double out[MAX_ENTRIES][MAX_DIMENSION_BIG], double direction [MAX_DIMENSION_BIG],double *step, int dimension ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int index[MAX_ENTRIES]; int maxTry=MAX_TRY; int i,j,k; double t,s; double centered[MAX_ENTRIES][MAX_DIMENSION_BIG]; double ordered[MAX_ENTRIES][MAX_DIMENSION_BIG]; double mean[MAX_DIMENSION_BIG]; double cov[DIMENSION][MAX_DIMENSION_BIG]; double projected[MAX_ENTRIES]; int order[MAX_ENTRIES]; for (i=0;i<numEntries;i++) for (j=0;j<dimension;j++) centered[i][j]=data[i][j]; centerInPlace_d(centered, numEntries, mean, dimension); covariance_d(centered, numEntries, cov, dimension); // check if they all are the same t=0; for (j=0;j<dimension;j++) t+= cov[j][j]; if (t<EPSILON || numEntries==0) { for (i=0;i<numEntries;i++) { index_[i]=0; for (j=0;j<dimension;j++) out[i][j]=mean[j]; } return 0.; } eigenVector_d(cov, direction, dimension); project_d(centered, numEntries, direction, projected, dimension); for (i=0;i<maxTry;i++) { if (i) { t=0; for (j=0;j<dimension;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=ordered[k][j]*index[k]; t+=direction[j]*direction[j]; } // Actually we don't need to normailize direction here, as the // optimal quntization (index) is invariant of the scale. // Hence we don't care about possible degenration of the <direction> either // though normally it should not happen // However, the EPSILON should be scaled, otherwise is does not make sense t = sqrt(t)*EPSILON; project_d(centered, numEntries, direction, projected, dimension); for (j=1; j < numEntries;j++) if (projected[order[j]] < projected[order[j-1]]-t /*EPSILON*/) break; if (j >= numEntries) break; } sortProjection(projected, order, numEntries); for (k=0;k<numEntries;k++) for (j=0;j<dimension;j++) ordered[k][j]=centered[order[k]][j]; quantTrace_d(ordered, numEntries, numClusters, index, dimension); } s=t=0; double q=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<dimension;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=ordered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; assert(t !=0); t = (t == 0 ? 0. : 1/t); for (i=0;i<numEntries;i++) { for (j=0;j<dimension;j++) out[order[i]][j]=mean[j]+direction[j]*t*(index[i]-s); index_[order[i]]=index[i]; } // normalize direction for output q=sqrt(q); *step=t*q; for (j=0;j<dimension;j++) direction[j]/=q; return totalError_d(data,out,numEntries, dimension); } void traceBuilder (int numEntries, int numClusters,struct TRACE tr [], int code[], int *trcnt ) { //================= #define DIG(J_IN,I,N,J_OUT,DIR,NC,NCC) \ for (I=J_IN;I<N || NC < NCC ;I++) { \ J_OUT = ((((J_IN) & 0x1)==DIR) ? I : N-1-(I-(J_IN))); \ //================= int i[7]; int j[7]; int k[7]={0,1,2,3,4,5,6}; int n; int c =0; int c0 =0; int p; int h[8]={0,0,0,0, 0,0,0,0}; if (numClusters == 1) { tr[c].k=0; tr[c].d=0; code[c]=0; *trcnt=0; return; } h[numClusters-1]=numEntries; int q = numEntries*(numClusters-1); int q2 = numEntries*(numClusters-1)*(numClusters-1); n = numEntries + numClusters -2; // higest delimiter postion; all points start in highest cluster int cd = -(1<< (numClusters-1)); DIG( 0,i[0],n,j[0],0,numClusters,2) DIG(j[0]+1,i[1],n,j[1],1,numClusters,3) DIG(j[1]+1,i[2],n,j[2],0,numClusters,4) DIG(j[2]+1,i[3],n,j[3],1,numClusters,5) DIG(j[3]+1,i[4],n,j[4],0,numClusters,6) DIG(j[4]+1,i[5],n,j[5],1,numClusters,7) DIG(j[5]+1,i[6],n,j[6],0,numClusters,8) int rescan; do { rescan=0; for (p=0;p<numClusters-1;p++) { if (abs(j[p]-k[p]) >1 ) { #ifdef USE_DBGTRACE DbgTrace(("Driving trace generation error\n")); for (p=0;p<numClusters-1;p++) DbgTrace(("%d %d %d\n",k[p],j[p],n)); #endif return; } else if (j[p]-k[p]== 1 ) { int ci= k[p]-p; // move it one cluster down "-" int cn=p+1; h[cn]--; h[cn-1]++; if (h[cn] < 0 || h[cn-1]>= numEntries) { rescan =1; h[cn]++; h[cn-1]--; } else { q2+= -2*cn+1; q--; { int i1,cc=0; for(i1=0;i1<numClusters;i1++) cc += i1*i1*h[i1]; #ifdef USE_DBGTRACE if (cc !=q2) DbgTrace(("1 - q2 %d %d\n", cc,q2)); #endif }; #ifdef USE_DBGTRACE if (ci <0 || ci>=numEntries || cn <1 || cn >= numClusters || h[cn] < 0 || h[cn-1]>= numEntries) DbgTrace(("tre1 %d %d %d %d %d %d \n",ci,cn,numEntries,numClusters,h[cn],h[cn-1])); #endif cd |= (1<<k[p]); cd &= ~(1<<j[p]); if (c < MAX_TRACE) // NP { tr[c].k=2*ci+1; tr[c].d=1./((double) q2 - (double) q*(double) q /(double) (numEntries)); code[c]=cd; c++; } else { // What to do here? tr[c].k=0; tr[c].d=0; code[c]=0; *trcnt=0; return; } k[p]=j[p]; } } else if (j[p]-k[p]==-1 ) { int ci=j[p]-p; // move it up int cn =p; h[cn]--; h[cn+1]++; if (h[cn] < 0 || h[cn+1]>= numEntries) { rescan =1; h[cn]++; h[cn+1]--; } else { q2+= 2*cn+1; q++; { int i1,cc=0; for(i1=0;i1<numClusters;i1++) cc += i1*i1*h[i1]; #ifdef USE_DBGTRACE if (cc !=q2) DbgTrace(("2- q2 %d %d\n", cc,q2)); #endif }; #ifdef USE_DBGTRACE if (ci <0 || ci>=numEntries || cn >= numClusters-1 || h[cn] < 0 || h[cn+1]>= numEntries) DbgTrace(("tre2 %d %d %d %d %d %d \n",ci,cn,numEntries,numClusters,h[cn],h[cn+1])); #endif cd |= (1<<k[p]); cd &= ~(1<<j[p]); if (c < MAX_TRACE) // NP { tr[c].k=2*ci; tr[c].d=1./((double) q2 - (double) q*(double) q /(double) (numEntries)); code[c]=cd; c++; } else { // What to do here? tr[c].k=0; tr[c].d=0; code[c]=0; *trcnt=0; return; } k[p]=j[p]; } } } } while (rescan); c0++; if (numClusters < 8) break; } if (numClusters < 7) break; } if (numClusters < 6) break; } if (numClusters < 5) break; } if (numClusters < 4) break; } if (numClusters < 3) break; } if (numClusters < 2) break; } *trcnt=c; } double optQuantAnD( double data[MAX_ENTRIES][DIMENSION], int numEntries, int numClusters, int index[MAX_ENTRIES], double out[MAX_ENTRIES][DIMENSION], double direction [DIMENSION],double *step ) { #ifdef USE_DBGTRACE DbgTrace(()); #endif int index_[MAX_ENTRIES]; int maxTry=MAX_TRY*10; int try_two=50; int i,j,k; double t,s; double centered[MAX_ENTRIES][DIMENSION]; double mean[DIMENSION]; double cov[DIMENSION][DIMENSION]; double projected[MAX_ENTRIES]; int order_[MAX_ENTRIES]; for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) centered[i][j]=data[i][j]; centerInPlace(centered, numEntries, mean); covariance(centered, numEntries, cov); // check if they all are the same t=0; for (j=0;j<DIMENSION;j++) t+= cov[j][j]; if (t==0 || numEntries==0) { for (i=0;i<numEntries;i++) { index[i]=0; for (j=0;j<DIMENSION;j++) out[i][j]=mean[j]; } return 0.; } eigenVector(cov, direction); project(centered, numEntries, direction, projected); for (i=0;i<maxTry;i++) { int done =0; if (i) { do { double q; q=s=t=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=centered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; assert(t !=0); t = (t == 0 ? 0. : 1/t); // We need to requantize q = sqrt(q); t *=q; if (q !=0) for (j=0;j<DIMENSION;j++) direction[j]/=q; // direction normalized project(centered, numEntries, direction, projected); sortProjection(projected, order_, numEntries); int index__[MAX_ENTRIES]; // it's projected and centered; cluster centers are (index[i]-s)*t (*dir) k=0; for (j=0; j < numEntries;j++) { while (projected[order_[j]] > (k+0.5 -s)*t && k < numClusters-1) k++; index__[order_[j]]=k; } done =1; for (j=0; j < numEntries;j++) { done = (done && (index__[j]==index[j])); index[j]=index__[j]; } } while (! done && try_two--); if (i==1) for (j=0; j < numEntries;j++) index_[j]=index[j]; else { done =1; for (j=0; j < numEntries;j++) { done = (done && (index_[j]==index[j])); index_[j]=index_[j]; } if (done) break; } } quant_AnD_Shell(projected, numClusters,numEntries, index); } s=t=0; double q=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<DIMENSION;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=centered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; #ifdef USE_DBGTRACE if (t==0) DbgTrace(("l;lkjk")); #endif assert(t !=0); t = (t == 0 ? 0. : 1/t); for (i=0;i<numEntries;i++) for (j=0;j<DIMENSION;j++) out[i][j]=mean[j]+direction[j]*t*(index[i]-s); // normalize direction for output q=sqrt(q); *step=t*q; for (j=0;j<DIMENSION;j++) direction[j]/=q; return totalError(data,out,numEntries); } double optQuantAnD_d( double data[MAX_ENTRIES][MAX_DIMENSION_BIG], int numEntries, int numClusters, int index[MAX_ENTRIES], double out[MAX_ENTRIES][MAX_DIMENSION_BIG], double direction [MAX_DIMENSION_BIG],double *step, int dimension ) { int index_[MAX_ENTRIES]; int maxTry=MAX_TRY*10; int try_two=50; int i,j,k; double t,s; double centered[MAX_ENTRIES][MAX_DIMENSION_BIG]; double mean[MAX_DIMENSION_BIG]; double cov[MAX_DIMENSION_BIG][MAX_DIMENSION_BIG]; double projected[MAX_ENTRIES]; int order_[MAX_ENTRIES]; for (i=0;i<numEntries;i++) for (j=0;j<dimension;j++) centered[i][j]=data[i][j]; centerInPlace_d(centered, numEntries, mean, dimension); covariance_d(centered, numEntries, cov, dimension); // check if they all are the same t=0; for (j=0;j<dimension;j++) t+= cov[j][j]; if (t<(1./256.) || numEntries==0) { for (i=0;i<numEntries;i++) { index[i]=0; for (j=0;j<dimension;j++) out[i][j]=mean[j]; } return 0.; } eigenVector_d(cov, direction, dimension); project_d(centered, numEntries, direction, projected, dimension); for (i=0;i<maxTry;i++) { int done =0; if (i) { do { double q; q=s=t=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<dimension;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=centered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; assert(t !=0); t = (t == 0 ? 0. : 1/t); // We need to requantize q = sqrt(q); t *=q; if (q !=0) for (j=0;j<dimension;j++) direction[j]/=q; // direction normalized project_d(centered, numEntries, direction, projected, dimension); sortProjection(projected, order_, numEntries); int index__[MAX_ENTRIES]; // it's projected and centered; cluster centers are (index[i]-s)*t (*dir) k=0; for (j=0; j < numEntries;j++) { while (projected[order_[j]] > (k+0.5 -s)*t && k < numClusters-1) k++; index__[order_[j]]=k; } done =1; for (j=0; j < numEntries;j++) { done = (done && (index__[j]==index[j])); index[j]=index__[j]; } } while (! done && try_two--); if (i==1) for (j=0; j < numEntries;j++) index_[j]=index[j]; else { done =1; for (j=0; j < numEntries;j++) { done = (done && (index_[j]==index[j])); index_[j]=index_[j]; } if (done) break; } } quant_AnD_Shell(projected, numClusters,numEntries, index); } s=t=0; double q=0; for (k=0;k<numEntries;k++) { s+= index[k]; t+= index[k]*index[k]; } for (j=0;j<dimension;j++) { direction[j]=0; for (k=0;k<numEntries;k++) direction[j]+=centered[k][j]*index[k]; q+= direction[j]* direction[j]; } s /= (double) numEntries; t = t - s * s * (double) numEntries; assert(t !=0); t = (t == 0 ? 0. : 1/t); for (i=0;i<numEntries;i++) for (j=0;j<dimension;j++) out[i][j]=mean[j]+direction[j]*t*(index[i]-s); // normalize direction for output q=sqrt(q); *step=t*q; for (j=0;j<dimension;j++) direction[j]/=q; return totalError_d(data,out,numEntries, dimension); }
/* * Copyright (C) 2019 TU Dresden * All rights reserved. * * Authors: * Christian Menard */ #ifndef REACTOR_CPP_ACTION_IMPL_HH #define REACTOR_CPP_ACTION_IMPL_HH #include "../assert.hh" #include "../environment.hh" namespace reactor { template <class T> template <class Dur> void Action<T>::schedule(const ImmutableValuePtr<T>& value_ptr, Dur delay) { Duration time_delay = std::chrono::duration_cast<Duration>(delay); // NOLINT reactor::validate(time_delay >= Duration::zero(), "Schedule cannot be called with a negative delay!"); reactor::validate(value_ptr != nullptr, "Actions may not be scheduled with a nullptr value!"); auto* scheduler = environment()->scheduler(); // NOLINT auto setup = [value_ptr, this]() { this->value_ptr_ = std::move(value_ptr); }; // NOLINT if (is_logical()) { time_delay += this->min_delay(); auto tag = Tag::from_logical_time(scheduler->logical_time()).delay(time_delay); // NOLINT scheduler->schedule_sync(tag, this, setup); } else { auto tag = Tag::from_physical_time(get_physical_time() + time_delay); // NOLINT scheduler->schedule_async(tag, this, setup); } } template <class Dur> void Action<void>::schedule(Dur delay) { auto time_delay = std::chrono::duration_cast<Duration>(delay); // NOLINT reactor::validate(time_delay >= Duration::zero(), "Schedule cannot be called with a negative delay!"); auto* scheduler = environment()->scheduler(); // NOLINT auto setup = [this]() { this->present_ = true; }; // NOLINT if (is_logical()) { time_delay += this->min_delay(); auto tag = Tag::from_logical_time(scheduler->logical_time()).delay(time_delay); // NOLINT scheduler->schedule_sync(tag, this, setup); } else { // physical action auto tag = Tag::from_physical_time(get_physical_time() + time_delay); // NOLINT scheduler->schedule_async(tag, this, setup); } } } // namespace reactor #endif
// Copyright (c) 2014-2017, The Monero Project // // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are // permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other // materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be // used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL // THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF // THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // Parts of this file are originally copyright (c) 2012-2013 The Cryptonote developers #include "command_line.h" #include <boost/algorithm/string/compare.hpp> #include <boost/algorithm/string/predicate.hpp> #include <unordered_set> #include "blockchain_db/db_types.h" #include "common/i18n.h" #include "cryptonote_config.h" #include "string_tools.h" namespace command_line { namespace { const char* tr(const char* str) { return i18n_translate(str, "command_line"); } } std::string input_line(const std::string& prompt) { std::cout << prompt; std::string buf; std::getline(std::cin, buf); return epee::string_tools::trim(buf); } bool is_yes(const std::string& str) { if (str == "y" || str == "Y") return true; boost::algorithm::is_iequal ignore_case{}; if (boost::algorithm::equals("yes", str, ignore_case)) return true; if (boost::algorithm::equals(command_line::tr("yes"), str, ignore_case)) return true; return false; } const arg_descriptor<bool> arg_help = {"help", "Produce help message"}; const arg_descriptor<bool> arg_version = {"version", "Output version information"}; const arg_descriptor<std::string> arg_data_dir = {"data-dir", "Specify data directory"}; const arg_descriptor<std::string> arg_testnet_data_dir = {"testnet-data-dir", "Specify testnet data directory"}; const arg_descriptor<bool> arg_test_drop_download = {"test-drop-download", "For net tests: in download, discard ALL blocks instead checking/saving them (very fast)"}; const arg_descriptor<uint64_t> arg_test_drop_download_height = {"test-drop-download-height", "Like test-drop-download but disards only after around certain height", 0}; const arg_descriptor<int> arg_test_dbg_lock_sleep = {"test-dbg-lock-sleep", "Sleep time in ms, defaults to 0 (off), used to debug before/after locking mutex. Values 100 to 1000 are good for tests."}; const arg_descriptor<bool, false> arg_testnet_on = { "testnet" , "Run on testnet. The wallet must be launched with --testnet flag." , false }; const arg_descriptor<bool> arg_dns_checkpoints = { "enforce-dns-checkpointing" , "checkpoints from DNS server will be enforced" , false }; std::string arg_db_type_description = "Specify database type, available: " + boost::algorithm::join(cryptonote::blockchain_db_types, ", "); const command_line::arg_descriptor<std::string> arg_db_type = { "db-type" , arg_db_type_description.c_str() , DEFAULT_DB_TYPE }; const command_line::arg_descriptor<std::string> arg_db_sync_mode = { "db-sync-mode" , "Specify sync option, using format [safe|fast|fastest]:[sync|async]:[nblocks_per_sync]." , "fast:async:1000" }; const command_line::arg_descriptor<uint64_t> arg_fast_block_sync = { "fast-block-sync" , "Sync up most of the way by using embedded, known block hashes." , 1 }; const command_line::arg_descriptor<uint64_t> arg_prep_blocks_threads = { "prep-blocks-threads" , "Max number of threads to use when preparing block hashes in groups." , 4 }; const command_line::arg_descriptor<uint64_t> arg_show_time_stats = { "show-time-stats" , "Show time-stats when processing blocks/txs and disk synchronization." , 0 }; const command_line::arg_descriptor<size_t> arg_block_sync_size = { "block-sync-size" , "How many blocks to sync at once during chain synchronization." , BLOCKS_SYNCHRONIZING_DEFAULT_COUNT }; const command_line::arg_descriptor<std::string> arg_check_updates = { "check-updates" , "Check for new versions of monero: [disabled|notify|download|update]" , "notify" }; }
// Copyright (C) 2017 - 2018 by Pedro Mendes, Virginia Tech Intellectual // Properties, Inc., University of Heidelberg, and University of // of Connecticut School of Medicine. // All rights reserved. // Copyright (C) 2010 - 2016 by Pedro Mendes, Virginia Tech Intellectual // Properties, Inc., University of Heidelberg, and The University // of Manchester. // All rights reserved. #include "CLRenderResolver.h" #include <set> #include <assert.h> #include "copasi/layout/CLRenderInformationBase.h" #include "copasi/layout/CLStyle.h" #include "copasi/layout/CLColorDefinition.h" #include "copasi/layout/CLLineEnding.h" #include "copasi/layout/CLGradientBase.h" #include "copasi/layout/CLGraphicalObject.h" #include "copasi/layout/CLGlyphs.h" #include "copasi/layout/CLReactionGlyph.h" #include "CLRenderFlattener.h" /** * Constructor that takes a local render information object as the argument * as well as a list of local and a list of global render information objects * that are needed to resolve external references. */ CLRenderResolver::CLRenderResolver(const CLLocalRenderInformation& renderInformation, const CDataVector<CLLocalRenderInformation>& localList, const CDataVector<CLGlobalRenderInformation>& globalList) : mpRenderInformation(CLRenderFlattener::flatten_render_information(renderInformation, localList, globalList)), mLocal(true), mpBackgroundColor(NULL) { // fill the maps this->fill_base_maps(this->mpRenderInformation); this->fill_local_maps(dynamic_cast<const CLLocalRenderInformation*>(mpRenderInformation)); this->setBackgroundColor(); } void CLRenderResolver::setBackgroundColor() { if (this->mpRenderInformation) { std::string id = this->mpRenderInformation->getBackgroundColor(); if (id.empty()) { id = "#ffffff"; mpRenderInformation->setBackgroundColor(id); } // must be a color value if (id[0] == '#') { assert(id.length() == 7 || id.length() == 9); this->mpBackgroundColor = new CLColorDefinition(); this->mpBackgroundColor->setColorValue(id); } else // must be an id { const CLColorDefinition* pColor = this->getColorDefinition(id); if (pColor) { // make a copy this->mpBackgroundColor = new CLColorDefinition(*pColor); } } } } /** * Constructor that takes a global render information object as the argument * and a list of additional global render information objects that might be * needed to resolve external references. * means it should contain all information from referenced render information objects. */ CLRenderResolver::CLRenderResolver(const CLGlobalRenderInformation& renderInformation, const CDataVector<CLGlobalRenderInformation>& globalList) : mpRenderInformation(CLRenderFlattener::flatten_render_information(renderInformation, globalList)), mLocal(false), mpBackgroundColor(NULL) { // fill the maps this->fill_base_maps(this->mpRenderInformation); this->fill_global_maps(dynamic_cast<const CLGlobalRenderInformation*>(this->mpRenderInformation)); this->setBackgroundColor(); } CLRenderResolver::~CLRenderResolver() { // we need to delete the render information object that has been created by // the flattener if (this->mpRenderInformation != NULL) delete this->mpRenderInformation; // delete the background color if (this->mpBackgroundColor != NULL) delete this->mpBackgroundColor; } /** * This method fills the color, gradient and line ending maps for a render * information object. */ void CLRenderResolver::fill_base_maps(const CLRenderInformationBase* pRenderInformation) { // fill maps for colors, gradients and line endings size_t i, iMax = pRenderInformation->getNumColorDefinitions(); const CLColorDefinition* pColor = NULL; for (i = 0; i < iMax; ++i) { pColor = pRenderInformation->getColorDefinition(i); this->mColorMap[pColor->getId()] = pColor; } iMax = pRenderInformation->getNumGradientDefinitions(); const CLGradientBase* pGradient = NULL; for (i = 0; i < iMax; ++i) { pGradient = pRenderInformation->getGradientDefinition(i); this->mGradientMap[pGradient->getId()] = pGradient; } iMax = pRenderInformation->getNumLineEndings(); const CLLineEnding* pLineEnding = NULL; for (i = 0; i < iMax; ++i) { pLineEnding = pRenderInformation->getLineEnding(i); this->mLineEndingMap[pLineEnding->getId()] = pLineEnding; } } /** * This method fills the type and role maps for a global render information * object. */ void CLRenderResolver::fill_global_maps(const CLGlobalRenderInformation* pRenderInformation) { size_t i, iMax = pRenderInformation->getNumStyles(); const CLStyle* pStyle = NULL; for (i = 0 ; i < iMax ; ++i) { pStyle = pRenderInformation->getStyle(i); const std::set<std::string>& roleList = pStyle->getRoleList(); std::set<std::string>::const_iterator it = roleList.begin(), endit = roleList.end(); while (it != endit) { this->mRoleMap[*it] = pStyle; ++it; } const std::set<std::string>& typeList = pStyle->getTypeList(); it = typeList.begin(); endit = typeList.end(); while (it != endit) { this->mTypeMap[*it] = pStyle; ++it; } } } /** * This method fills the type, role and id maps for a local render information * object. */ void CLRenderResolver::fill_local_maps(const CLLocalRenderInformation* pRenderInformation) { size_t i, iMax = pRenderInformation->getNumStyles(); const CLStyle* pStyle = NULL; for (i = 0; i < iMax; ++i) { pStyle = pRenderInformation->getStyle(i); const std::set<std::string>& roleList = pStyle->getRoleList(); std::set<std::string>::const_iterator it = roleList.begin(), endit = roleList.end(); while (it != endit) { this->mRoleMap[*it] = pStyle; ++it; } const std::set<std::string>& typeList = pStyle->getTypeList(); it = typeList.begin(); endit = typeList.end(); while (it != endit) { this->mTypeMap[*it] = pStyle; ++it; } const CLLocalStyle* pLocalStyle = dynamic_cast<const CLLocalStyle*>(pStyle); const std::set<std::string>& keyList = pLocalStyle->getKeyList(); it = keyList.begin(); endit = keyList.end(); while (it != endit) { this->mKeyMap[*it] = pStyle; ++it; } } } /** * Method that tries to find the style for the given graphical * object that fits best. * If no style is found NULL is returned. */ const CLStyle* CLRenderResolver::resolveStyle(const CLGraphicalObject* pObject) const { const CLStyle* pResult = NULL; // try to resolve the id first pResult = this->resolveStyleForKey(pObject->getKey()); // next try the role if (pResult == NULL) { std::string role = pObject->getObjectRole(); const CLReferenceGlyph* pRG = dynamic_cast<const CLReferenceGlyph*>(pObject); if (pRG != NULL && role.empty()) role = pRG->getRole(); const CLMetabReferenceGlyph* pSRG = dynamic_cast<const CLMetabReferenceGlyph*>(pObject); if (pSRG != NULL && role.empty()) { CLMetabReferenceGlyph::Role role_t = pSRG->getRole(); switch (role_t) { case CLMetabReferenceGlyph::SUBSTRATE: role = "substrate"; break; case CLMetabReferenceGlyph::PRODUCT: role = "product"; break; case CLMetabReferenceGlyph::SIDESUBSTRATE: role = "sidesubstrate"; break; case CLMetabReferenceGlyph::SIDEPRODUCT: role = "sideproduct"; break; case CLMetabReferenceGlyph::MODIFIER: role = "modifier"; break; case CLMetabReferenceGlyph::ACTIVATOR: role = "activator"; break; case CLMetabReferenceGlyph::INHIBITOR: role = "inhibitor"; break; default: role = ""; } // try if we can find a role in the deduced role map if (role.empty()) { std::map<const CLMetabReferenceGlyph*, std::string>::const_iterator pos = this->mDeducedObjectRoles.find(pSRG); if (pos != this->mDeducedObjectRoles.end()) { role = pos->second; } } } if (!role.empty()) { pResult = this->resolveStyleForRole(role); } // last try the type if (pResult == NULL) { std::string type = "GRAPHICALOBJECT"; if (dynamic_cast<const CLCompartmentGlyph*>(pObject)) { type = "COMPARTMENTGLYPH"; } else if (dynamic_cast<const CLMetabGlyph*>(pObject)) { type = "SPECIESGLYPH"; } else if (dynamic_cast<const CLReactionGlyph*>(pObject)) { type = "REACTIONGLYPH"; } else if (dynamic_cast<const CLMetabReferenceGlyph*>(pObject)) { type = "SPECIESREFERENCEGLYPH"; } else if (dynamic_cast<const CLTextGlyph*>(pObject)) { type = "TEXTGLYPH"; } pResult = this->resolveStyleForType(type); } } return pResult; } /** * Method that tries to find the style for the given role. * If no style is found NULL is returned. */ const CLStyle* CLRenderResolver::resolveStyleForRole(const std::string& role) const { const CLStyle* pResult = NULL; std::map<std::string, const CLStyle*>::const_iterator pos = this->mRoleMap.find(role); if (pos != this->mRoleMap.end()) { pResult = pos->second; } return pResult; } /** * Method that tries to find the style for the given type. * If no style is found NULL is returned. */ const CLStyle* CLRenderResolver::resolveStyleForType(const std::string& type) const { const CLStyle* pResult = NULL; std::map<std::string, const CLStyle*>::const_iterator pos = this->mTypeMap.find(type); if (pos != this->mTypeMap.end()) { pResult = pos->second; } // look for a style that is valid for ANY type else { pos = this->mTypeMap.find("ANY"); if (pos != this->mTypeMap.end()) { pResult = pos->second; } } return pResult; } /** * Method that tries to find the style for the given key. * If no style is found NULL is returned. */ const CLStyle* CLRenderResolver::resolveStyleForKey(const std::string& key) const { const CLStyle* pResult = NULL; std::map<std::string, const CLStyle*>::const_iterator pos = this->mKeyMap.find(key); if (pos != this->mKeyMap.end()) { pResult = pos->second; } return pResult; } /** * Returns the gradient definition for a given id. */ const CLGradientBase* CLRenderResolver::getGradientBase(const std::string& id) const { const CLGradientBase* pResult = NULL; std::map<std::string, const CLGradientBase*>::const_iterator pos = this->mGradientMap.find(id); if (pos != this->mGradientMap.end()) { pResult = pos->second; } return pResult; } /** * Returns the color definition for a given id. */ const CLColorDefinition* CLRenderResolver::getColorDefinition(const std::string& id) const { const CLColorDefinition* pResult = NULL; std::map<std::string, const CLColorDefinition*>::const_iterator pos = this->mColorMap.find(id); if (pos != this->mColorMap.end()) { pResult = pos->second; } return pResult; } /** * Returns the line ending for a given id. */ const CLLineEnding* CLRenderResolver::getLineEnding(const std::string& id) const { const CLLineEnding* pResult = NULL; std::map<std::string, const CLLineEnding*>::const_iterator pos = this->mLineEndingMap.find(id); if (pos != this->mLineEndingMap.end()) { pResult = pos->second; } return pResult; } /** * Returns the background color. */ const CLColorDefinition* CLRenderResolver::getBackgroundColor() const { return this->mpBackgroundColor; } /** * Sets the deduced object roles. */ void CLRenderResolver::setDeducedObjectRoles(const std::map<const CLMetabReferenceGlyph*, std::string>& deducedObjectRoles) { this->mDeducedObjectRoles = deducedObjectRoles; } CDataModel * CLRenderResolver::getObjectDataModel() const { CDataModel * pDataModel = mpRenderInformation->getObjectDataModel(); assert(pDataModel != NULL); return pDataModel; }
#include <iostream> #include <iomanip> #include "color/color.hpp" int main( int argc, char *argv[] ) { ::color::cmy< float > c0; //!< Instead of float you may put std::uint8_t,std::uint16_t, std::uint32_t, std::uint64_t, double, long double ::color::lms< std::uint8_t > c1; //!< Instead of std::uint8_t you may put std::uint16_t, std::uint32_t, std::uint64_t, float, double, long double c0 = ::color::constant::lavender_type{}; c1 = ::color::constant::orange_type{}; // Assign c0 = c1; std::cout << c0[0] << ", " << c0[1] << ", " << c0[2] << std::endl; // .. and vice versa c1 = c0; std::cout << c1[0] << ", " << c1[1] << ", " << c1[2] << std::endl; return EXIT_SUCCESS; }
// Copyright 2020 The Chromium OS Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "cryptohome/disk_cleanup_routines.h" #include <memory> #include <set> #include <utility> #include <vector> #include <base/files/file_path.h> #include <base/logging.h> #include "cryptohome/homedirs.h" #include "cryptohome/mount_constants.h" #include "cryptohome/platform.h" using base::FilePath; namespace cryptohome { DiskCleanupRoutines::DiskCleanupRoutines(HomeDirs* homedirs, Platform* platform) : homedirs_(homedirs), platform_(platform) {} DiskCleanupRoutines::~DiskCleanupRoutines() = default; bool DiskCleanupRoutines::DeleteUserCache(const std::string& obfuscated) { FilePath user_dir = GetShadowDir(obfuscated); FilePath cache; if (!GetTrackedDirectory( user_dir, FilePath(kUserHomeSuffix).Append(kCacheDir), &cache)) { LOG(ERROR) << "Failed to locate the cache directory."; return false; } VLOG(1) << "Deleting Cache " << cache.value(); if (!DeleteDirectoryContents(cache)) { LOG(ERROR) << "Failed to remove the Cache directory"; return false; } return true; } bool DiskCleanupRoutines::DeleteUserGCache(const std::string& obfuscated) { FilePath user_dir = GetShadowDir(obfuscated); bool ret = true; // GCache dirs that can be completely removed on low space. const FilePath kRemovableGCacheDirs[] = { FilePath(kUserHomeSuffix) .Append(kGCacheDir) .Append(kGCacheVersion1Dir) .Append(kGCacheTmpDir), }; for (const auto& dir : kRemovableGCacheDirs) { FilePath gcachetmp; if (!GetTrackedDirectory(user_dir, dir, &gcachetmp)) { LOG(ERROR) << "Failed to locate GCache temp directory " << dir.value(); ret = false; continue; } VLOG(1) << "Deleting GCache " << gcachetmp.value(); if (!DeleteDirectoryContents(gcachetmp)) { LOG(ERROR) << "Failed to remove the GCache directory"; ret = false; } } // GCache dirs that contain files marked as removable. const FilePath kCleanableGCacheDirs[] = { FilePath(kUserHomeSuffix).Append(kGCacheDir).Append(kGCacheVersion1Dir), FilePath(kUserHomeSuffix).Append(kGCacheDir).Append(kGCacheVersion2Dir), }; for (const auto& dir : kCleanableGCacheDirs) { FilePath gcache_dir; if (!GetTrackedDirectory(user_dir, dir, &gcache_dir)) { LOG(ERROR) << "Failed to locate GCache directory " << dir.value(); ret = false; continue; } VLOG(1) << "Cleaning removable files in " << gcache_dir.value(); if (!RemoveAllRemovableFiles(gcache_dir)) { ret = false; } } return ret; } bool DiskCleanupRoutines::DeleteUserAndroidCache( const std::string& obfuscated) { FilePath user_dir = GetShadowDir(obfuscated); bool ret = true; FilePath root; if (!GetTrackedDirectory(user_dir, FilePath(kRootHomeSuffix), &root)) { LOG(ERROR) << "Failed to locate the root directory."; return false; } // The package directory stores the inodes of the cache directory and code // cache directory in the kAndroidCacheInodeAttribute xattr and // kAndroidCodeCacheInodeAttribute xattr. Data is stored under // root/android-data/data/data/<package name>/[code_]cache. It is not // desirable to make all package name directories unencrypted, they // are not marked as tracked directory. // TODO(crbug/625872): Mark root/android/data/data/ as pass through. // A set of parent directory/inode combinations. We need the parent directory // as the inodes may have been re-used elsewhere if the cache directory was // deleted. std::set<std::pair<const FilePath, ino_t>> cache_inodes; std::unique_ptr<cryptohome::FileEnumerator> file_enumerator( platform_->GetFileEnumerator(root, true, base::FileEnumerator::DIRECTORIES)); FilePath next_path; while (!(next_path = file_enumerator->Next()).empty()) { ino_t inode = file_enumerator->GetInfo().stat().st_ino; std::pair<const FilePath, ino_t> parent_inode_pair = std::make_pair(next_path.DirName(), inode); if (cache_inodes.find(parent_inode_pair) != cache_inodes.end()) { VLOG(1) << "Deleting Android Cache " << next_path.value(); if (!DeleteDirectoryContents(next_path)) { LOG(ERROR) << "Failed to remove android cache " << next_path.value(); ret = false; } cache_inodes.erase(parent_inode_pair); } for (const char* attribute : {kAndroidCacheInodeAttribute, kAndroidCodeCacheInodeAttribute}) { if (platform_->HasExtendedFileAttribute(next_path, attribute)) { uint64_t inode; if (platform_->GetExtendedFileAttribute(next_path, attribute, reinterpret_cast<char*>(&inode), sizeof(inode))) { // Because FileEnumerator processes all entries in a directory before // continuing to sub-directories we can assume that the inode is added // here before the directory that has the inode is processed. cache_inodes.insert(std::make_pair(next_path, inode)); } } } } return ret; } bool DiskCleanupRoutines::DeleteUserProfile(const std::string& obfuscated) { FilePath shadow_dir = GetShadowDir(obfuscated); homedirs_->RemoveLECredentials(obfuscated); if (!platform_->DeleteFile(shadow_dir, true)) { PLOG(WARNING) << "Failed to remove " << shadow_dir.value(); return false; } return true; } base::FilePath DiskCleanupRoutines::GetShadowDir( const std::string& obfuscated) const { return homedirs_->shadow_root().Append(obfuscated); } bool DiskCleanupRoutines::GetTrackedDirectory(const FilePath& user_dir, const FilePath& tracked_dir_name, FilePath* out) { FilePath vault_path = user_dir.Append(kEcryptfsVaultDir); if (platform_->DirectoryExists(vault_path)) { // On Ecryptfs, tracked directories' names are not encrypted. *out = user_dir.Append(kEcryptfsVaultDir).Append(tracked_dir_name); return true; } // This is dircrypto. Use the xattr to locate the directory. return GetTrackedDirectoryForDirCrypto(user_dir.Append(kMountDir), tracked_dir_name, out); } bool DiskCleanupRoutines::GetTrackedDirectoryForDirCrypto( const FilePath& mount_dir, const FilePath& tracked_dir_name, FilePath* out) { FilePath current_name; FilePath current_path = mount_dir; // Iterate over name components. This way, we don't have to inspect every // directory under |mount_dir|. std::vector<std::string> name_components; tracked_dir_name.GetComponents(&name_components); for (const auto& name_component : name_components) { FilePath next_path; std::unique_ptr<FileEnumerator> enumerator( platform_->GetFileEnumerator(current_path, false /* recursive */, base::FileEnumerator::DIRECTORIES)); for (FilePath dir = enumerator->Next(); !dir.empty(); dir = enumerator->Next()) { if (platform_->HasExtendedFileAttribute(dir, kTrackedDirectoryNameAttribute)) { std::string name; if (!platform_->GetExtendedFileAttributeAsString( dir, kTrackedDirectoryNameAttribute, &name)) return false; if (name == name_component) { // This is the directory we're looking for. next_path = dir; break; } } } if (next_path.empty()) { LOG(ERROR) << "Tracked dir not found " << tracked_dir_name.value(); return false; } current_path = next_path; } *out = current_path; return true; } bool DiskCleanupRoutines::DeleteDirectoryContents(const FilePath& dir) { bool ret = true; std::unique_ptr<FileEnumerator> subdir_enumerator( platform_->GetFileEnumerator(dir, false, base::FileEnumerator::FILES | base::FileEnumerator::DIRECTORIES | base::FileEnumerator::SHOW_SYM_LINKS)); for (FilePath subdir_path = subdir_enumerator->Next(); !subdir_path.empty(); subdir_path = subdir_enumerator->Next()) { if (!platform_->DeleteFile(subdir_path, true)) { PLOG(WARNING) << "Failed to remove " << subdir_path.value(); ret = false; } } return ret; } bool DiskCleanupRoutines::RemoveAllRemovableFiles(const FilePath& dir) { bool ret = true; std::unique_ptr<FileEnumerator> file_enumerator( platform_->GetFileEnumerator(dir, true, base::FileEnumerator::FILES)); for (FilePath file = file_enumerator->Next(); !file.empty(); file = file_enumerator->Next()) { if (platform_->HasNoDumpFileAttribute(file) || platform_->HasExtendedFileAttribute(file, kRemovableFileAttribute)) { if (!platform_->DeleteFile(file, false)) { PLOG(WARNING) << "Failed to remove: " << file.value(); ret = false; } } } return ret; } } // namespace cryptohome
class CfgFactionClasses { class NO_CATEGORY; class zei_interiors: NO_CATEGORY { displayName = "Interiors (ZEI)"; }; class zei_interiors_dev: NO_CATEGORY { displayName = "Interiors Development (ZEI)"; }; };
/* * Copyright (c) 2017 - 2018, Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "runtime/helpers/surface_formats.h" #include "runtime/helpers/aligned_memory.h" #include "runtime/gmm_helper/gmm.h" #include "runtime/mem_obj/image.h" #include "unit_tests/fixtures/device_fixture.h" #include "unit_tests/helpers/debug_manager_state_restore.h" #include "unit_tests/helpers/kernel_binary_helper.h" #include "unit_tests/mocks/mock_buffer.h" #include "unit_tests/mocks/mock_command_queue.h" #include "unit_tests/mocks/mock_context.h" #include "unit_tests/mocks/mock_gmm_resource_info.h" #include "gtest/gtest.h" #include "test.h" using namespace OCLRT; class Nv12ImageTest : public testing::Test { public: void computeExpectedOffsets(Image *image) { SurfaceOffsets expectedSurfaceOffsets = {0}; GMM_REQ_OFFSET_INFO reqOffsetInfo = {}; SurfaceOffsets requestedOffsets = {0}; auto mockResInfo = reinterpret_cast<::testing::NiceMock<MockGmmResourceInfo> *>(image->getGraphicsAllocation()->gmm->gmmResourceInfo.get()); mockResInfo->getOffset(reqOffsetInfo); if (image->getImageDesc().mem_object) { expectedSurfaceOffsets.offset = reqOffsetInfo.Render.Offset; expectedSurfaceOffsets.xOffset = reqOffsetInfo.Render.XOffset / (mockResInfo->getBitsPerPixel() / 8); expectedSurfaceOffsets.yOffset = reqOffsetInfo.Render.YOffset; } expectedSurfaceOffsets.yOffsetForUVplane = reqOffsetInfo.Lock.Offset / reqOffsetInfo.Lock.Pitch; image->getSurfaceOffsets(requestedOffsets); EXPECT_EQ(expectedSurfaceOffsets.offset, requestedOffsets.offset); EXPECT_EQ(expectedSurfaceOffsets.xOffset, requestedOffsets.xOffset); EXPECT_EQ(expectedSurfaceOffsets.yOffset, requestedOffsets.yOffset); EXPECT_EQ(expectedSurfaceOffsets.yOffsetForUVplane, requestedOffsets.yOffsetForUVplane); } protected: void SetUp() override { imageFormat.image_channel_data_type = CL_UNORM_INT8; imageFormat.image_channel_order = CL_NV12_INTEL; imageDesc.mem_object = NULL; imageDesc.image_array_size = 0; imageDesc.image_depth = 1; imageDesc.image_height = 4 * 4; // Valid values multiple of 4 imageDesc.image_width = 4 * 4; // Valid values multiple of 4 imageDesc.image_row_pitch = 0; imageDesc.image_slice_pitch = 0; imageDesc.image_type = CL_MEM_OBJECT_IMAGE2D; imageDesc.num_mip_levels = 0; imageDesc.num_samples = 0; flags = CL_MEM_HOST_NO_ACCESS; } void validateImageWithFlags(cl_mem_flags flags) { auto surfaceFormat = Image::getSurfaceFormatFromTable(flags, &imageFormat); retVal = Image::validate(&context, flags, surfaceFormat, &imageDesc, nullptr); } Image *createImageWithFlags(cl_mem_flags flags) { auto surfaceFormat = Image::getSurfaceFormatFromTable(flags, &imageFormat); return Image::create(&context, flags, surfaceFormat, &imageDesc, nullptr, retVal); } cl_int retVal = CL_SUCCESS; MockContext context; cl_image_format imageFormat; cl_image_desc imageDesc; cl_mem_flags flags; }; TEST_F(Nv12ImageTest, isNV12ImageReturnsTrue) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); EXPECT_TRUE(IsNV12Image(&image->getImageFormat())); delete image; } TEST_F(Nv12ImageTest, validNV12ImageFormatAndDescriptor) { validateImageWithFlags(flags); EXPECT_EQ(CL_SUCCESS, retVal); } TEST_F(Nv12ImageTest, invalidNV12ImageFormat) { imageFormat.image_channel_data_type = CL_SNORM_INT16; validateImageWithFlags(flags); EXPECT_EQ(CL_IMAGE_FORMAT_NOT_SUPPORTED, retVal); } TEST_F(Nv12ImageTest, invalidNV12ImageType) { imageDesc.image_type = CL_MEM_OBJECT_IMAGE1D; validateImageWithFlags(flags); EXPECT_EQ(CL_INVALID_IMAGE_DESCRIPTOR, retVal); } TEST_F(Nv12ImageTest, DISABLED_invalidNV12ImageDepth) { imageDesc.image_depth = 2; validateImageWithFlags(flags); EXPECT_EQ(CL_INVALID_IMAGE_DESCRIPTOR, retVal); } TEST_F(Nv12ImageTest, invalidNV12ImageHeigth) { imageDesc.image_height = 17; validateImageWithFlags(flags); EXPECT_EQ(CL_INVALID_IMAGE_DESCRIPTOR, retVal); } TEST_F(Nv12ImageTest, invalidNV12ImageWidth) { imageDesc.image_width = 17; validateImageWithFlags(flags); EXPECT_EQ(CL_INVALID_IMAGE_DESCRIPTOR, retVal); } TEST_F(Nv12ImageTest, invalidNV12ImageFlag) { flags &= ~(CL_MEM_HOST_NO_ACCESS); validateImageWithFlags(flags); EXPECT_EQ(CL_INVALID_VALUE, retVal); } TEST_F(Nv12ImageTest, validateNV12YPlane) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); imageDesc.mem_object = image; imageDesc.image_depth = 0; // Plane Y of NV12 image validateImageWithFlags(CL_MEM_READ_WRITE); EXPECT_EQ(CL_SUCCESS, retVal); delete image; } TEST_F(Nv12ImageTest, validateNV12YUVPlane) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); imageDesc.mem_object = image; imageDesc.image_depth = 1; // Plane UV of NV12 image validateImageWithFlags(CL_MEM_READ_WRITE); EXPECT_EQ(CL_SUCCESS, retVal); delete image; } TEST_F(Nv12ImageTest, givenNV12ImageWhenInvalidDepthIsPassedThenValidateFails) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); imageDesc.mem_object = image; imageDesc.image_depth = 3; // Invalid Plane of NV12 image validateImageWithFlags(CL_MEM_READ_WRITE); EXPECT_EQ(CL_INVALID_IMAGE_DESCRIPTOR, retVal); delete image; } TEST_F(Nv12ImageTest, given2DImageWhenPassedToValidateImageTraitsThenValidateReturnsSuccess) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); imageDesc.mem_object = image; imageDesc.image_depth = 0; retVal = Image::validateImageTraits(&context, CL_MEM_READ_WRITE, &imageFormat, &imageDesc, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); delete image; } TEST_F(Nv12ImageTest, given1DImageWhenPassedAsParentImageThenValidateImageTraitsReturnsSuccess) { imageDesc.image_type = CL_MEM_OBJECT_IMAGE1D; auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); imageDesc.mem_object = image; imageDesc.image_depth = 0; retVal = Image::validateImageTraits(&context, CL_MEM_READ_WRITE, &imageFormat, &imageDesc, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); delete image; } TEST_F(Nv12ImageTest, givenBufferWhenPassedAsNV12ParentImageThenValidateImageTraitsReturnsInvalidDesriptor) { MockBuffer Buffer; imageDesc.mem_object = &Buffer; imageDesc.image_depth = 0; // Plane of NV12 image retVal = Image::validateImageTraits(&context, CL_MEM_READ_WRITE, &imageFormat, &imageDesc, nullptr); EXPECT_EQ(CL_INVALID_IMAGE_DESCRIPTOR, retVal); } TEST_F(Nv12ImageTest, createNV12Image) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); auto rowPitch = image->getHostPtrRowPitch(); EXPECT_NE(0u, rowPitch); SurfaceOffsets surfaceOffsets; image->getSurfaceOffsets(surfaceOffsets); EXPECT_EQ(0u, surfaceOffsets.offset); EXPECT_EQ(0u, surfaceOffsets.xOffset); EXPECT_EQ(0u, surfaceOffsets.yOffset); EXPECT_NE(0u, surfaceOffsets.yOffsetForUVplane); delete image; } TEST_F(Nv12ImageTest, createNV12YPlaneImage) { // Create Parent NV12 image auto imageNV12 = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, imageNV12); imageDesc.mem_object = imageNV12; imageFormat.image_channel_data_type = CL_UNORM_INT8; imageFormat.image_channel_order = CL_R; imageDesc.image_width = 0; imageDesc.image_height = 0; imageDesc.image_depth = 0; // Create NV12 Y Plane image auto imageYPlane = createImageWithFlags(CL_MEM_READ_WRITE); ASSERT_NE(nullptr, imageYPlane); EXPECT_EQ(true, imageYPlane->isImageFromImage()); EXPECT_EQ(imageNV12->getGraphicsAllocation(), imageYPlane->getGraphicsAllocation()); cl_image_desc parentDimensions, planeDimensions; parentDimensions = imageNV12->getImageDesc(); planeDimensions = imageYPlane->getImageDesc(); EXPECT_EQ(parentDimensions.image_height, planeDimensions.image_height); EXPECT_EQ(parentDimensions.image_width, planeDimensions.image_width); EXPECT_EQ(0u, planeDimensions.image_depth); EXPECT_NE(0u, planeDimensions.image_row_pitch); EXPECT_EQ(parentDimensions.image_slice_pitch, planeDimensions.image_slice_pitch); EXPECT_EQ(parentDimensions.image_type, planeDimensions.image_type); EXPECT_EQ(parentDimensions.image_array_size, planeDimensions.image_array_size); computeExpectedOffsets(imageYPlane); computeExpectedOffsets(imageNV12); delete imageYPlane; delete imageNV12; } TEST_F(Nv12ImageTest, createNV12UVPlaneImage) { // Create Parent NV12 image auto imageNV12 = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, imageNV12); imageDesc.mem_object = imageNV12; imageFormat.image_channel_data_type = CL_UNORM_INT8; imageFormat.image_channel_order = CL_R; imageDesc.image_width = 0; imageDesc.image_height = 0; imageDesc.image_depth = 1; // UV plane // Create NV12 UV Plane image auto imageUVPlane = createImageWithFlags(CL_MEM_READ_WRITE); ASSERT_NE(nullptr, imageUVPlane); EXPECT_EQ(true, imageUVPlane->isImageFromImage()); EXPECT_EQ(imageNV12->getGraphicsAllocation(), imageUVPlane->getGraphicsAllocation()); cl_image_desc parentDimensions, planeDimensions; parentDimensions = imageNV12->getImageDesc(); planeDimensions = imageUVPlane->getImageDesc(); EXPECT_EQ(parentDimensions.image_height / 2, planeDimensions.image_height); EXPECT_EQ(parentDimensions.image_width / 2, planeDimensions.image_width); EXPECT_EQ(0u, planeDimensions.image_depth); EXPECT_EQ(parentDimensions.image_row_pitch, planeDimensions.image_row_pitch); EXPECT_NE(0u, planeDimensions.image_row_pitch); EXPECT_EQ(parentDimensions.image_slice_pitch, planeDimensions.image_slice_pitch); EXPECT_EQ(parentDimensions.image_type, planeDimensions.image_type); EXPECT_EQ(parentDimensions.image_array_size, planeDimensions.image_array_size); computeExpectedOffsets(imageUVPlane); computeExpectedOffsets(imageNV12); delete imageUVPlane; delete imageNV12; } TEST_F(Nv12ImageTest, createNV12UVPlaneImageWithOffsetOfUVPlane) { // This size returns offset of UV plane, and 0 yOffset imageDesc.image_height = 64; // Valid values multiple of 4 imageDesc.image_width = 64; // Valid values multiple of 4 // Create Parent NV12 image auto imageNV12 = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, imageNV12); imageDesc.mem_object = imageNV12; imageFormat.image_channel_data_type = CL_UNORM_INT8; imageFormat.image_channel_order = CL_R; imageDesc.image_width = 0; imageDesc.image_height = 0; imageDesc.image_depth = 1; // UV plane // Create NV12 UV Plane image auto imageUVPlane = createImageWithFlags(CL_MEM_READ_WRITE); ASSERT_NE(nullptr, imageUVPlane); EXPECT_EQ(true, imageUVPlane->isImageFromImage()); EXPECT_EQ(imageNV12->getGraphicsAllocation(), imageUVPlane->getGraphicsAllocation()); cl_image_desc parentDimensions, planeDimensions; parentDimensions = imageNV12->getImageDesc(); planeDimensions = imageUVPlane->getImageDesc(); EXPECT_EQ(parentDimensions.image_height / 2, planeDimensions.image_height); EXPECT_EQ(parentDimensions.image_width / 2, planeDimensions.image_width); EXPECT_EQ(0u, planeDimensions.image_depth); EXPECT_EQ(parentDimensions.image_row_pitch, planeDimensions.image_row_pitch); EXPECT_NE(0u, planeDimensions.image_row_pitch); EXPECT_EQ(parentDimensions.image_slice_pitch, planeDimensions.image_slice_pitch); EXPECT_EQ(parentDimensions.image_type, planeDimensions.image_type); EXPECT_EQ(parentDimensions.image_array_size, planeDimensions.image_array_size); computeExpectedOffsets(imageUVPlane); computeExpectedOffsets(imageNV12); delete imageUVPlane; delete imageNV12; } HWTEST_F(Nv12ImageTest, checkIfPlanesAreWritten) { KernelBinaryHelper kbHelper(KernelBinaryHelper::BUILT_INS); auto device = std::unique_ptr<Device>(MockDevice::createWithNewExecutionEnvironment<MockDevice>(nullptr)); char hostPtr[16 * 16 * 16]; auto contextWithMockCmdQ = new MockContext(device.get(), true); auto cmdQ = new MockCommandQueueHw<FamilyType>(contextWithMockCmdQ, device.get(), 0); contextWithMockCmdQ->overrideSpecialQueueAndDecrementRefCount(cmdQ); // Create Parent NV12 image cl_mem_flags flags = CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL | CL_MEM_USE_HOST_PTR; auto surfaceFormat = Image::getSurfaceFormatFromTable(flags, &imageFormat); auto imageNV12 = Image::create(contextWithMockCmdQ, flags, surfaceFormat, &imageDesc, hostPtr, retVal); EXPECT_EQ(2u, cmdQ->EnqueueWriteImageCounter); ASSERT_NE(nullptr, imageNV12); contextWithMockCmdQ->release(); delete imageNV12; } HWTEST_F(Nv12ImageTest, setImageArg) { typedef typename FamilyType::RENDER_SURFACE_STATE RENDER_SURFACE_STATE; RENDER_SURFACE_STATE surfaceState; auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); SurfaceOffsets surfaceOffsets; image->getSurfaceOffsets(surfaceOffsets); image->setImageArg(&surfaceState, false, 0); EXPECT_EQ(surfaceOffsets.xOffset, surfaceState.getXOffset()); EXPECT_EQ(surfaceOffsets.yOffset, surfaceState.getYOffset()); EXPECT_EQ(surfaceOffsets.yOffsetForUVplane, surfaceState.getYOffsetForUOrUvPlane()); // NV 12 image has correct alpha channel == one EXPECT_EQ(RENDER_SURFACE_STATE::SHADER_CHANNEL_SELECT_ALPHA_ONE, surfaceState.getShaderChannelSelectAlpha()); delete image; } HWTEST_F(Nv12ImageTest, setImageArgUVPlaneImageSetsOffsetedSurfaceBaseAddressAndSetsCorrectTileMode) { typedef typename FamilyType::RENDER_SURFACE_STATE RENDER_SURFACE_STATE; RENDER_SURFACE_STATE surfaceState; // Create Parent NV12 image auto imageNV12 = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, imageNV12); imageDesc.mem_object = imageNV12; imageFormat.image_channel_data_type = CL_UNORM_INT8; imageFormat.image_channel_order = CL_R; imageDesc.image_width = 0; imageDesc.image_height = 0; imageDesc.image_depth = 1; // UV plane // Create NV12 UV Plane image auto imageUVPlane = createImageWithFlags(CL_MEM_READ_WRITE); ASSERT_NE(nullptr, imageUVPlane); EXPECT_EQ(imageNV12->getGraphicsAllocation(), imageUVPlane->getGraphicsAllocation()); SurfaceOffsets surfaceOffsets; imageUVPlane->getSurfaceOffsets(surfaceOffsets); imageUVPlane->setImageArg(&surfaceState, false, 0); EXPECT_EQ(imageUVPlane->getGraphicsAllocation()->getGpuAddress() + surfaceOffsets.offset, surfaceState.getSurfaceBaseAddress()); auto tileMode = RENDER_SURFACE_STATE::TILE_MODE_LINEAR; if (imageNV12->allowTiling()) { tileMode = RENDER_SURFACE_STATE::TILE_MODE_YMAJOR; } EXPECT_EQ(tileMode, surfaceState.getTileMode()); delete imageUVPlane; delete imageNV12; } HWTEST_F(Nv12ImageTest, setMediaImageArg) { using MEDIA_SURFACE_STATE = typename FamilyType::MEDIA_SURFACE_STATE; MEDIA_SURFACE_STATE surfaceState; auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); SurfaceOffsets surfaceOffsets; image->getSurfaceOffsets(surfaceOffsets); image->setMediaImageArg(&surfaceState); EXPECT_EQ(surfaceOffsets.xOffset, surfaceState.getXOffsetForUCb()); EXPECT_EQ(surfaceOffsets.yOffset, surfaceState.getXOffsetForUCb()); EXPECT_EQ(surfaceOffsets.yOffsetForUVplane, surfaceState.getYOffsetForUCb()); EXPECT_EQ(image->getGraphicsAllocation()->getGpuAddress() + surfaceOffsets.offset, surfaceState.getSurfaceBaseAddress()); delete image; } TEST_F(Nv12ImageTest, redescribedNV12ImageAndUVPlaneImageHasCorrectOffsets) { auto image = createImageWithFlags(CL_MEM_READ_ONLY | CL_MEM_ACCESS_FLAGS_UNRESTRICTED_INTEL); ASSERT_NE(nullptr, image); auto imageRedescribed = image->redescribe(); ASSERT_NE(nullptr, imageRedescribed); SurfaceOffsets imageOffsets, redescribedOffsets; image->getSurfaceOffsets(imageOffsets); imageRedescribed->getSurfaceOffsets(redescribedOffsets); EXPECT_EQ(imageOffsets.xOffset, redescribedOffsets.xOffset); EXPECT_EQ(imageOffsets.yOffset, redescribedOffsets.yOffset); EXPECT_EQ(imageOffsets.yOffsetForUVplane, redescribedOffsets.yOffsetForUVplane); delete imageRedescribed; imageDesc.mem_object = image; imageFormat.image_channel_data_type = CL_UNORM_INT8; imageFormat.image_channel_order = CL_R; imageDesc.image_width = 0; imageDesc.image_height = 0; imageDesc.image_depth = 1; // UV plane // Create NV12 UV Plane image auto imageUVPlane = createImageWithFlags(CL_MEM_READ_WRITE); ASSERT_NE(nullptr, imageUVPlane); imageRedescribed = imageUVPlane->redescribe(); ASSERT_NE(nullptr, imageRedescribed); imageUVPlane->getSurfaceOffsets(imageOffsets); imageRedescribed->getSurfaceOffsets(redescribedOffsets); EXPECT_EQ(imageOffsets.xOffset, redescribedOffsets.xOffset); EXPECT_EQ(imageOffsets.yOffset, redescribedOffsets.yOffset); EXPECT_EQ(imageOffsets.yOffsetForUVplane, redescribedOffsets.yOffsetForUVplane); delete imageRedescribed; delete imageUVPlane; delete image; } TEST_F(Nv12ImageTest, invalidPlanarYUVImageHeight) { auto pDevice = context.getDevice(0); const size_t *maxHeight = nullptr; size_t srcSize = 0; size_t retSize = 0; ASSERT_NE(nullptr, pDevice); pDevice->getCap<CL_DEVICE_PLANAR_YUV_MAX_HEIGHT_INTEL>(reinterpret_cast<const void *&>(maxHeight), srcSize, retSize); imageDesc.image_height = *maxHeight + 12; retVal = Image::validatePlanarYUV(&context, flags, &imageDesc, nullptr); EXPECT_EQ(CL_INVALID_IMAGE_SIZE, retVal); } TEST_F(Nv12ImageTest, invalidPlanarYUVImageWidth) { auto pDevice = context.getDevice(0); const size_t *maxWidth = nullptr; size_t srcSize = 0; size_t retSize = 0; ASSERT_NE(nullptr, pDevice); pDevice->getCap<CL_DEVICE_PLANAR_YUV_MAX_WIDTH_INTEL>(reinterpret_cast<const void *&>(maxWidth), srcSize, retSize); imageDesc.image_width = *maxWidth + 12; retVal = Image::validatePlanarYUV(&context, flags, &imageDesc, nullptr); EXPECT_EQ(CL_INVALID_IMAGE_SIZE, retVal); } TEST_F(Nv12ImageTest, validPlanarYUVImageHeight) { retVal = Image::validatePlanarYUV(&context, flags, &imageDesc, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); } TEST_F(Nv12ImageTest, validPlanarYUVImageWidth) { retVal = Image::validatePlanarYUV(&context, flags, &imageDesc, nullptr); EXPECT_EQ(CL_SUCCESS, retVal); }
/********************************************************************************************************************** This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich. Licensed under the BSD-2 license (see LICENSE file in main directory) **********************************************************************************************************************/ #include "HyQForwardZero.h" namespace ct { namespace models { namespace HyQ { HyQForwardZero::OUT_TYPE HyQForwardZero::forwardZero(const Eigen::VectorXd& x_in) { double* forwardZero = eval_.data(); v_[0] = 1 / cos(x_in[1]); v_[1] = cos(x_in[2]); v_[2] = 0 - v_[0]; v_[3] = sin(x_in[2]); forwardZero[0] = v_[0] * v_[1] * x_in[18] + v_[2] * v_[3] * x_in[19]; forwardZero[1] = v_[3] * x_in[18] + v_[1] * x_in[19]; v_[4] = sin(x_in[1]); forwardZero[2] = x_in[20] + v_[0] * v_[3] * v_[4] * x_in[19] + v_[2] * v_[1] * v_[4] * x_in[18]; v_[4] = 0.5 * x_in[0]; v_[3] = cos(v_[4]); v_[2] = 0.5 * x_in[1]; v_[1] = cos(v_[2]); v_[0] = v_[3] * v_[1]; v_[5] = 0.5 * x_in[2]; v_[6] = sin(v_[5]); v_[4] = sin(v_[4]); v_[2] = sin(v_[2]); v_[7] = v_[4] * v_[2]; v_[5] = cos(v_[5]); v_[8] = v_[0] * v_[6] + v_[7] * v_[5]; v_[9] = 2. * v_[8]; v_[1] = v_[4] * v_[1]; v_[2] = v_[3] * v_[2]; v_[10] = v_[1] * v_[5] + v_[2] * v_[6]; v_[11] = v_[9] * v_[10]; v_[2] = v_[2] * v_[5] - v_[1] * v_[6]; v_[1] = 2. * v_[2]; v_[5] = v_[0] * v_[5] - v_[7] * v_[6]; v_[7] = v_[1] * v_[5]; v_[6] = v_[1] * v_[10]; v_[0] = v_[9] * v_[5]; v_[8] = v_[9] * v_[8]; v_[1] = v_[1] * v_[2]; forwardZero[3] = (v_[11] + v_[7]) * x_in[23] + (v_[6] - v_[0]) * x_in[22] + (1 - v_[8] - v_[1]) * x_in[21]; v_[2] = v_[9] * v_[2]; v_[9] = 2. * v_[10]; v_[5] = v_[9] * v_[5]; v_[9] = v_[9] * v_[10]; forwardZero[4] = (v_[2] - v_[5]) * x_in[23] + (1 - v_[8] - v_[9]) * x_in[22] + (v_[6] + v_[0]) * x_in[21]; forwardZero[5] = (1 - v_[1] - v_[9]) * x_in[23] + (v_[2] + v_[5]) * x_in[22] + (v_[11] - v_[7]) * x_in[21]; v_[9] = cos(x_in[15]); v_[5] = cos(x_in[16]); v_[2] = cos(x_in[17]); v_[1] = -8.81e-05 * v_[2]; v_[7] = sin(x_in[17]); v_[11] = 0 - v_[7]; v_[8] = 8.81e-05 * v_[11]; v_[0] = 0.35 * v_[7]; v_[6] = 8.81e-05 * v_[0]; v_[10] = 2. * v_[6]; v_[12] = -0.35 * v_[2]; v_[13] = 0 - -8.81e-05 * v_[12]; v_[14] = 2. * v_[13]; v_[6] = 0 - v_[6]; v_[15] = -0.000102 + v_[10] * v_[11] + v_[14] * v_[2] - v_[2] * v_[13] - v_[7] * v_[6]; v_[16] = -0.00185880198235362 * v_[0] - 0.414812462825713 * v_[12]; v_[17] = 0.881 * v_[12]; v_[18] = -0.1104774 + v_[17]; v_[19] = v_[16] + v_[18]; v_[20] = 0.880992588508842 * v_[0] - -0.00185880198235362 * v_[12]; v_[21] = 0 - 0.881 * v_[0]; v_[22] = -0.0004405 + v_[21]; v_[23] = v_[20] + v_[22]; v_[18] = 0 - v_[18]; v_[24] = v_[2] * v_[18] + v_[11] * v_[22]; v_[25] = 0.47422804 + v_[19] * v_[2] + v_[23] * v_[7] + v_[24]; v_[20] = v_[20] * v_[0] - v_[16] * v_[12]; v_[16] = 0.089871 + v_[20]; v_[26] = v_[25] / v_[16]; v_[27] = v_[1] * v_[2] + v_[8] * v_[7] - v_[15] * v_[26]; v_[28] = sin(x_in[16]); v_[29] = 0 - v_[28]; v_[30] = -8.81e-05 * v_[7]; v_[31] = 8.81e-05 * v_[2]; v_[6] = 2.1e-05 + v_[10] * v_[2] + v_[14] * v_[7] + v_[11] * v_[13] + v_[2] * v_[6]; v_[14] = v_[30] * v_[2] + v_[31] * v_[7] - v_[6] * v_[26]; v_[13] = v_[5] * v_[27] + v_[29] * v_[14]; v_[18] = v_[7] * v_[18] + v_[2] * v_[22]; v_[23] = -0.0825825 + v_[19] * v_[11] + v_[23] * v_[2] - v_[18]; v_[19] = v_[23] / v_[16]; v_[8] = v_[1] * v_[11] + v_[8] * v_[2] - v_[15] * v_[19]; v_[31] = v_[30] * v_[11] + v_[31] * v_[2] - v_[6] * v_[19]; v_[30] = v_[5] * v_[8] + v_[29] * v_[31]; v_[1] = -0.0004405 * v_[12] + v_[21] * v_[12] - -0.1104774 * v_[0]; v_[17] = 0.026409 + -0.2209548 * v_[12] + v_[17] * v_[12] - v_[20]; v_[12] = v_[7] * v_[1] + v_[2] * v_[17]; v_[21] = 0.000468 - -0.000881 * v_[0] - v_[21] * v_[0] - v_[20]; v_[1] = v_[7] * v_[21] + v_[2] * v_[1]; v_[22] = v_[6] / v_[16]; v_[10] = -0.007418 + v_[12] * v_[11] + v_[1] * v_[2] - v_[15] * v_[22]; v_[18] = 0.0825825 + v_[18]; v_[32] = 0 - v_[5]; v_[33] = 0.08 * v_[32]; v_[34] = 0.08 * v_[28]; v_[35] = 0 - 4.027 * v_[34]; v_[24] = -0.47422804 - v_[24]; v_[36] = v_[10] + v_[18] * v_[33] + v_[35] * v_[33] - v_[24] * v_[34]; v_[1] = v_[1] * v_[7] + v_[12] * v_[2]; v_[22] = 0.087136 + v_[20] + v_[1] - v_[6] * v_[22]; v_[12] = 4.027 * v_[33]; v_[37] = -7.4114911576828e-06 * v_[7] + -0.00185880198235362 * v_[2]; v_[38] = -0.00185880198235362 * v_[7] + -0.466187537174287 * v_[2]; v_[39] = v_[37] * v_[7] + v_[38] * v_[2]; v_[19] = 3.56080505133456 - v_[39] - v_[23] * v_[19]; v_[38] = v_[38] * v_[11] + v_[37] * v_[2] - v_[23] * v_[26]; v_[37] = v_[34] * v_[19] - v_[33] * v_[38]; v_[39] = 4.027 + v_[39] - v_[25] * v_[26]; v_[26] = v_[34] * v_[38] - v_[33] * v_[39]; v_[40] = v_[37] * v_[34] - v_[26] * v_[33]; v_[41] = 2. * v_[24] * v_[33] + v_[22] + v_[12] * v_[33] - v_[40]; v_[42] = v_[29] * v_[36] + v_[32] * v_[41]; v_[1] = 0.005495 + v_[17] + v_[21] + v_[20] - v_[1] - v_[15] * v_[15] / v_[16]; v_[21] = v_[1] - 2. * v_[18] * v_[34] - v_[35] * v_[34] - v_[40]; v_[36] = v_[29] * v_[21] + v_[32] * v_[36]; v_[17] = -0.022734 + v_[42] * v_[29] + v_[36] * v_[5]; v_[20] = v_[29] * v_[8] + v_[32] * v_[31]; v_[43] = v_[29] * v_[27] + v_[32] * v_[14]; v_[44] = v_[20] * v_[5] + v_[43] * v_[29]; v_[36] = v_[36] * v_[29] + v_[42] * v_[32]; v_[42] = 0.011033 + v_[40] + v_[36]; v_[45] = v_[44] / v_[42]; v_[46] = v_[13] * v_[29] + v_[30] * v_[5] - v_[17] * v_[45]; v_[47] = 0.207 * v_[9]; v_[48] = sin(x_in[15]); v_[49] = 0.3735 * v_[48]; v_[50] = 0 - v_[9]; v_[51] = -0.3735 * v_[9]; v_[52] = v_[48] * v_[49] + v_[50] * v_[51]; v_[12] = v_[12] + v_[24]; v_[53] = 0 - v_[12]; v_[35] = v_[35] + v_[18]; v_[54] = v_[5] * v_[53] + v_[29] * v_[35]; v_[55] = 0.1466472 + v_[54]; v_[56] = v_[55] / v_[42]; v_[57] = 0 - v_[44] * v_[56]; v_[58] = v_[52] * v_[57]; v_[59] = v_[39] - 4.027; v_[60] = v_[29] * v_[38] + v_[32] * v_[59]; v_[61] = v_[19] - 4.027; v_[62] = v_[29] * v_[61] + v_[32] * v_[38]; v_[43] = v_[20] * v_[29] + v_[43] * v_[32]; v_[20] = v_[43] / v_[42]; v_[63] = v_[60] * v_[29] + v_[62] * v_[5] - v_[44] * v_[20]; v_[64] = v_[47] * v_[63]; v_[65] = v_[58] - v_[64]; v_[53] = v_[29] * v_[53] + v_[32] * v_[35]; v_[66] = -0.5824264 - v_[53] - v_[17] * v_[56]; v_[67] = 0.207 * v_[48]; v_[68] = 7.467 - v_[55] * v_[56]; v_[69] = 0 - v_[55] * v_[20]; v_[70] = v_[52] * v_[68] - v_[47] * v_[69]; v_[71] = v_[46] * v_[47] + v_[65] * v_[47] - v_[66] * v_[67] - v_[70] * v_[67]; v_[72] = v_[8] * v_[34] - v_[27] * v_[33]; v_[33] = v_[31] * v_[34] - v_[14] * v_[33]; v_[73] = 0 - v_[33]; v_[74] = -5.1e-05 + v_[29] * v_[72] + v_[5] * v_[73]; v_[56] = 0 - v_[74] * v_[56]; v_[75] = v_[67] * v_[69]; v_[58] = v_[75] - v_[58]; v_[12] = v_[26] + v_[12]; v_[35] = v_[37] + v_[35]; v_[53] = 0.5824264 + v_[12] * v_[29] + v_[35] * v_[5] + v_[53] - v_[74] * v_[45]; v_[62] = v_[62] * v_[29] + v_[60] * v_[32]; v_[61] = 7.467 + v_[59] + v_[61] - v_[62] - v_[44] * v_[45]; v_[59] = v_[67] * v_[63] - v_[52] * v_[61]; v_[45] = v_[56] * v_[67] + v_[58] * v_[67] - v_[53] * v_[47] - v_[59] * v_[47]; v_[60] = 0 - v_[9] * v_[71] - v_[48] * v_[45]; v_[37] = cos(x_in[12]); v_[26] = 0 - v_[37]; v_[76] = cos(x_in[13]); v_[77] = sin(x_in[14]); v_[78] = 0.35 * v_[77]; v_[79] = cos(x_in[14]); v_[80] = -0.35 * v_[79]; v_[81] = -0.00370574557885489 * v_[78] - 0.417781802643138 * v_[80]; v_[82] = 0.881 * v_[80]; v_[83] = -0.110125 + v_[82]; v_[84] = v_[81] + v_[83]; v_[85] = 0.880970354035369 * v_[78] - -0.00370574557885489 * v_[80]; v_[86] = 0 - 0.881 * v_[78]; v_[87] = -0.000881 + v_[86]; v_[88] = v_[85] + v_[87]; v_[83] = 0 - v_[83]; v_[89] = 0 - v_[77]; v_[90] = v_[79] * v_[83] + v_[89] * v_[87]; v_[91] = 0.47422804 + v_[84] * v_[79] + v_[88] * v_[77] + v_[90]; v_[85] = v_[85] * v_[78] - v_[81] * v_[80]; v_[81] = 0.089871 + v_[85]; v_[92] = v_[91] / v_[81]; v_[93] = 0 - -0.000102 * v_[92]; v_[94] = sin(x_in[13]); v_[95] = 0 - v_[94]; v_[96] = 0 - 2.1e-05 * v_[92]; v_[97] = v_[76] * v_[93] + v_[95] * v_[96]; v_[83] = v_[77] * v_[83] + v_[79] * v_[87]; v_[88] = -0.0825825 + v_[84] * v_[89] + v_[88] * v_[79] - v_[83]; v_[84] = v_[88] / v_[81]; v_[87] = 0 - -0.000102 * v_[84]; v_[98] = 0 - 2.1e-05 * v_[84]; v_[99] = v_[76] * v_[87] + v_[95] * v_[98]; v_[100] = -0.000881 * v_[80] + v_[86] * v_[80] - -0.110125 * v_[78]; v_[82] = 0.026409 + -0.22025 * v_[80] + v_[82] * v_[80] - v_[85]; v_[80] = v_[77] * v_[100] + v_[79] * v_[82]; v_[86] = 0.000468 - -0.001762 * v_[78] - v_[86] * v_[78] - v_[85]; v_[100] = v_[77] * v_[86] + v_[79] * v_[100]; v_[101] = 2.1e-05 / v_[81]; v_[102] = -0.007418 + v_[80] * v_[89] + v_[100] * v_[79] - -0.000102 * v_[101]; v_[83] = 0.0825825 + v_[83]; v_[103] = -0.08 * v_[76]; v_[104] = -0.08 * v_[94]; v_[105] = -1 * v_[104]; v_[106] = 0 - 4.027 * v_[105]; v_[90] = -0.47422804 - v_[90]; v_[107] = v_[102] + v_[83] * v_[103] + v_[106] * v_[103] - v_[90] * v_[105]; v_[100] = v_[100] * v_[77] + v_[80] * v_[79]; v_[101] = 0.087136 + v_[85] + v_[100] - 2.1e-05 * v_[101]; v_[80] = 4.027 * v_[103]; v_[108] = -2.96459646308422e-05 * v_[77] + -0.00370574557885489 * v_[79]; v_[109] = -0.00370574557885489 * v_[77] + -0.463218197356862 * v_[79]; v_[110] = v_[108] * v_[77] + v_[109] * v_[79]; v_[84] = 3.56375215667851 - v_[110] - v_[88] * v_[84]; v_[109] = v_[109] * v_[89] + v_[108] * v_[79] - v_[88] * v_[92]; v_[108] = v_[105] * v_[84] - v_[103] * v_[109]; v_[110] = 4.027 + v_[110] - v_[91] * v_[92]; v_[92] = v_[105] * v_[109] - v_[103] * v_[110]; v_[111] = v_[108] * v_[105] - v_[92] * v_[103]; v_[112] = 2. * v_[90] * v_[103] + v_[101] + v_[80] * v_[103] - v_[111]; v_[113] = v_[94] * v_[107] + v_[76] * v_[112]; v_[100] = 0.005495 + v_[82] + v_[86] + v_[85] - v_[100] - -0.000102 * -0.000102 / v_[81]; v_[86] = v_[100] - 2. * v_[83] * v_[105] - v_[106] * v_[105] - v_[111]; v_[107] = v_[94] * v_[86] + v_[76] * v_[107]; v_[82] = 0.022734 + v_[113] * v_[95] + v_[107] * v_[76]; v_[85] = v_[94] * v_[87] + v_[76] * v_[98]; v_[114] = v_[94] * v_[93] + v_[76] * v_[96]; v_[115] = v_[85] * v_[76] + v_[114] * v_[95]; v_[107] = v_[107] * v_[94] + v_[113] * v_[76]; v_[113] = 0.011033 + v_[111] + v_[107]; v_[116] = v_[115] / v_[113]; v_[117] = v_[97] * v_[95] + v_[99] * v_[76] - v_[82] * v_[116]; v_[118] = -0.207 * v_[37]; v_[119] = -1 * v_[118]; v_[120] = sin(x_in[12]); v_[121] = 0 - v_[120]; v_[122] = 0.3735 * v_[120]; v_[123] = 0.3735 * v_[37]; v_[124] = v_[121] * v_[122] + v_[26] * v_[123]; v_[80] = v_[80] + v_[90]; v_[125] = 0 - v_[80]; v_[106] = v_[106] + v_[83]; v_[126] = v_[76] * v_[125] + v_[95] * v_[106]; v_[127] = 0.1466472 + v_[126]; v_[128] = v_[127] / v_[113]; v_[129] = 0 - v_[115] * v_[128]; v_[130] = v_[124] * v_[129]; v_[131] = v_[110] - 4.027; v_[132] = v_[94] * v_[109] + v_[76] * v_[131]; v_[133] = v_[84] - 4.027; v_[134] = v_[94] * v_[133] + v_[76] * v_[109]; v_[114] = v_[85] * v_[94] + v_[114] * v_[76]; v_[85] = v_[114] / v_[113]; v_[135] = v_[132] * v_[95] + v_[134] * v_[76] - v_[115] * v_[85]; v_[136] = v_[119] * v_[135]; v_[137] = v_[130] - v_[136]; v_[125] = v_[94] * v_[125] + v_[76] * v_[106]; v_[138] = 0.5824264 - v_[125] - v_[82] * v_[128]; v_[139] = 0.207 * v_[120]; v_[140] = 7.467 - v_[127] * v_[128]; v_[141] = 0 - v_[127] * v_[85]; v_[142] = v_[124] * v_[140] - v_[119] * v_[141]; v_[143] = v_[117] * v_[119] + v_[137] * v_[119] - v_[138] * v_[139] - v_[142] * v_[139]; v_[144] = v_[87] * v_[105] - v_[93] * v_[103]; v_[105] = v_[98] * v_[105] - v_[96] * v_[103]; v_[103] = 0 - v_[105]; v_[145] = -5.1e-05 + v_[95] * v_[144] + v_[76] * v_[103]; v_[128] = 0 - v_[145] * v_[128]; v_[146] = v_[139] * v_[141]; v_[130] = v_[146] - v_[130]; v_[80] = -1 * (v_[92] + v_[80]); v_[106] = -1 * (v_[108] + v_[106]); v_[125] = -0.5824264 + v_[80] * v_[95] + v_[106] * v_[76] + v_[125] - v_[145] * v_[116]; v_[134] = v_[134] * v_[94] + v_[132] * v_[76]; v_[133] = 7.467 + v_[131] + v_[133] - v_[134] - v_[115] * v_[116]; v_[131] = v_[139] * v_[135] - v_[124] * v_[133]; v_[116] = v_[128] * v_[139] + v_[130] * v_[139] - v_[125] * v_[119] - v_[131] * v_[119]; v_[132] = 0 - v_[26] * v_[143] - v_[121] * v_[116]; v_[108] = cos(x_in[9]); v_[92] = cos(x_in[10]); v_[147] = cos(x_in[11]); v_[148] = 8.81e-05 * v_[147]; v_[149] = sin(x_in[11]); v_[150] = 0 - v_[149]; v_[151] = -8.81e-05 * v_[150]; v_[152] = 0.35 * v_[149]; v_[153] = -8.81e-05 * v_[152]; v_[154] = 2. * v_[153]; v_[155] = -0.35 * v_[147]; v_[156] = 0 - 8.81e-05 * v_[155]; v_[157] = 2. * v_[156]; v_[153] = 0 - v_[153]; v_[158] = 0.000102 + v_[154] * v_[150] + v_[157] * v_[147] - v_[147] * v_[156] - v_[149] * v_[153]; v_[159] = 0.00185880198235362 * v_[152] - 0.414812462825713 * v_[155]; v_[160] = 0.881 * v_[155]; v_[161] = -0.1104774 + v_[160]; v_[162] = v_[159] + v_[161]; v_[163] = 0.880992588508842 * v_[152] - 0.00185880198235362 * v_[155]; v_[164] = 0 - 0.881 * v_[152]; v_[165] = 0.0004405 + v_[164]; v_[166] = v_[163] + v_[165]; v_[161] = 0 - v_[161]; v_[167] = v_[147] * v_[161] + v_[150] * v_[165]; v_[168] = 0.47422804 + v_[162] * v_[147] + v_[166] * v_[149] + v_[167]; v_[163] = v_[163] * v_[152] - v_[159] * v_[155]; v_[159] = 0.089871 + v_[163]; v_[169] = v_[168] / v_[159]; v_[170] = v_[148] * v_[147] + v_[151] * v_[149] - v_[158] * v_[169]; v_[171] = sin(x_in[10]); v_[172] = 0 - v_[171]; v_[173] = 8.81e-05 * v_[149]; v_[174] = -8.81e-05 * v_[147]; v_[153] = 2.1e-05 + v_[154] * v_[147] + v_[157] * v_[149] + v_[150] * v_[156] + v_[147] * v_[153]; v_[157] = v_[173] * v_[147] + v_[174] * v_[149] - v_[153] * v_[169]; v_[156] = v_[92] * v_[170] + v_[172] * v_[157]; v_[161] = v_[149] * v_[161] + v_[147] * v_[165]; v_[166] = 0.0825825 + v_[162] * v_[150] + v_[166] * v_[147] - v_[161]; v_[162] = v_[166] / v_[159]; v_[151] = v_[148] * v_[150] + v_[151] * v_[147] - v_[158] * v_[162]; v_[174] = v_[173] * v_[150] + v_[174] * v_[147] - v_[153] * v_[162]; v_[173] = v_[92] * v_[151] + v_[172] * v_[174]; v_[148] = 0.0004405 * v_[155] + v_[164] * v_[155] - -0.1104774 * v_[152]; v_[160] = 0.026409 + -0.2209548 * v_[155] + v_[160] * v_[155] - v_[163]; v_[155] = v_[149] * v_[148] + v_[147] * v_[160]; v_[164] = 0.000468 - 0.000881 * v_[152] - v_[164] * v_[152] - v_[163]; v_[148] = v_[149] * v_[164] + v_[147] * v_[148]; v_[165] = v_[153] / v_[159]; v_[154] = 0.007418 + v_[155] * v_[150] + v_[148] * v_[147] - v_[158] * v_[165]; v_[161] = -0.0825825 + v_[161]; v_[175] = 0 - v_[92]; v_[176] = 0.08 * v_[175]; v_[177] = 0.08 * v_[171]; v_[178] = 0 - 4.027 * v_[177]; v_[167] = -0.47422804 - v_[167]; v_[179] = v_[154] + v_[161] * v_[176] + v_[178] * v_[176] - v_[167] * v_[177]; v_[148] = v_[148] * v_[149] + v_[155] * v_[147]; v_[165] = 0.087136 + v_[163] + v_[148] - v_[153] * v_[165]; v_[155] = 4.027 * v_[176]; v_[180] = -7.4114911576828e-06 * v_[149] + 0.00185880198235362 * v_[147]; v_[181] = 0.00185880198235362 * v_[149] + -0.466187537174287 * v_[147]; v_[182] = v_[180] * v_[149] + v_[181] * v_[147]; v_[162] = 3.56080505133456 - v_[182] - v_[166] * v_[162]; v_[181] = v_[181] * v_[150] + v_[180] * v_[147] - v_[166] * v_[169]; v_[180] = v_[177] * v_[162] - v_[176] * v_[181]; v_[182] = 4.027 + v_[182] - v_[168] * v_[169]; v_[169] = v_[177] * v_[181] - v_[176] * v_[182]; v_[183] = v_[180] * v_[177] - v_[169] * v_[176]; v_[184] = 2. * v_[167] * v_[176] + v_[165] + v_[155] * v_[176] - v_[183]; v_[185] = v_[172] * v_[179] + v_[175] * v_[184]; v_[148] = 0.005495 + v_[160] + v_[164] + v_[163] - v_[148] - v_[158] * v_[158] / v_[159]; v_[164] = v_[148] - 2. * v_[161] * v_[177] - v_[178] * v_[177] - v_[183]; v_[179] = v_[172] * v_[164] + v_[175] * v_[179]; v_[160] = 0.022734 + v_[185] * v_[172] + v_[179] * v_[92]; v_[163] = v_[172] * v_[151] + v_[175] * v_[174]; v_[186] = v_[172] * v_[170] + v_[175] * v_[157]; v_[187] = v_[163] * v_[92] + v_[186] * v_[172]; v_[179] = v_[179] * v_[172] + v_[185] * v_[175]; v_[185] = 0.011033 + v_[183] + v_[179]; v_[188] = v_[187] / v_[185]; v_[189] = v_[156] * v_[172] + v_[173] * v_[92] - v_[160] * v_[188]; v_[190] = 0.207 * v_[108]; v_[191] = sin(x_in[9]); v_[192] = -0.3735 * v_[191]; v_[193] = 0 - v_[108]; v_[194] = 0.3735 * v_[108]; v_[195] = v_[191] * v_[192] + v_[193] * v_[194]; v_[155] = v_[155] + v_[167]; v_[196] = 0 - v_[155]; v_[178] = v_[178] + v_[161]; v_[197] = v_[92] * v_[196] + v_[172] * v_[178]; v_[198] = 0.1466472 + v_[197]; v_[199] = v_[198] / v_[185]; v_[200] = 0 - v_[187] * v_[199]; v_[201] = v_[195] * v_[200]; v_[202] = v_[182] - 4.027; v_[203] = v_[172] * v_[181] + v_[175] * v_[202]; v_[204] = v_[162] - 4.027; v_[205] = v_[172] * v_[204] + v_[175] * v_[181]; v_[186] = v_[163] * v_[172] + v_[186] * v_[175]; v_[163] = v_[186] / v_[185]; v_[206] = v_[203] * v_[172] + v_[205] * v_[92] - v_[187] * v_[163]; v_[207] = v_[190] * v_[206]; v_[208] = v_[201] - v_[207]; v_[196] = v_[172] * v_[196] + v_[175] * v_[178]; v_[209] = 0.5824264 - v_[196] - v_[160] * v_[199]; v_[210] = 0.207 * v_[191]; v_[211] = 7.467 - v_[198] * v_[199]; v_[212] = 0 - v_[198] * v_[163]; v_[213] = v_[195] * v_[211] - v_[190] * v_[212]; v_[214] = v_[189] * v_[190] + v_[208] * v_[190] - v_[209] * v_[210] - v_[213] * v_[210]; v_[215] = v_[151] * v_[177] - v_[170] * v_[176]; v_[176] = v_[174] * v_[177] - v_[157] * v_[176]; v_[216] = 0 - v_[176]; v_[217] = -5.1e-05 + v_[172] * v_[215] + v_[92] * v_[216]; v_[199] = 0 - v_[217] * v_[199]; v_[218] = v_[210] * v_[212]; v_[201] = v_[218] - v_[201]; v_[155] = v_[169] + v_[155]; v_[178] = v_[180] + v_[178]; v_[196] = -0.5824264 + v_[155] * v_[172] + v_[178] * v_[92] + v_[196] - v_[217] * v_[188]; v_[205] = v_[205] * v_[172] + v_[203] * v_[175]; v_[204] = 7.467 + v_[202] + v_[204] - v_[205] - v_[187] * v_[188]; v_[202] = v_[210] * v_[206] - v_[195] * v_[204]; v_[188] = v_[199] * v_[210] + v_[201] * v_[210] - v_[196] * v_[190] - v_[202] * v_[190]; v_[203] = 0 - v_[108] * v_[214] - v_[191] * v_[188]; v_[180] = cos(x_in[6]); v_[169] = 0 - v_[180]; v_[219] = cos(x_in[7]); v_[220] = cos(x_in[8]); v_[221] = 8.81e-05 * v_[220]; v_[222] = sin(x_in[8]); v_[223] = 0 - v_[222]; v_[224] = -8.81e-05 * v_[223]; v_[225] = 0.35 * v_[222]; v_[226] = -8.81e-05 * v_[225]; v_[227] = 2. * v_[226]; v_[228] = -0.35 * v_[220]; v_[229] = 0 - 8.81e-05 * v_[228]; v_[230] = 2. * v_[229]; v_[226] = 0 - v_[226]; v_[231] = 0.000102 + v_[227] * v_[223] + v_[230] * v_[220] - v_[220] * v_[229] - v_[222] * v_[226]; v_[232] = 0.00185880198235362 * v_[225] - 0.414812462825713 * v_[228]; v_[233] = 0.881 * v_[228]; v_[234] = -0.1104774 + v_[233]; v_[235] = v_[232] + v_[234]; v_[236] = 0.880992588508842 * v_[225] - 0.00185880198235362 * v_[228]; v_[237] = 0 - 0.881 * v_[225]; v_[238] = 0.0004405 + v_[237]; v_[239] = v_[236] + v_[238]; v_[234] = 0 - v_[234]; v_[240] = v_[220] * v_[234] + v_[223] * v_[238]; v_[241] = 0.47422804 + v_[235] * v_[220] + v_[239] * v_[222] + v_[240]; v_[236] = v_[236] * v_[225] - v_[232] * v_[228]; v_[232] = 0.089871 + v_[236]; v_[242] = v_[241] / v_[232]; v_[243] = v_[221] * v_[220] + v_[224] * v_[222] - v_[231] * v_[242]; v_[244] = sin(x_in[7]); v_[245] = 0 - v_[244]; v_[246] = 8.81e-05 * v_[222]; v_[247] = -8.81e-05 * v_[220]; v_[226] = 2.1e-05 + v_[227] * v_[220] + v_[230] * v_[222] + v_[223] * v_[229] + v_[220] * v_[226]; v_[230] = v_[246] * v_[220] + v_[247] * v_[222] - v_[226] * v_[242]; v_[229] = v_[219] * v_[243] + v_[245] * v_[230]; v_[234] = v_[222] * v_[234] + v_[220] * v_[238]; v_[239] = 0.0825825 + v_[235] * v_[223] + v_[239] * v_[220] - v_[234]; v_[235] = v_[239] / v_[232]; v_[224] = v_[221] * v_[223] + v_[224] * v_[220] - v_[231] * v_[235]; v_[247] = v_[246] * v_[223] + v_[247] * v_[220] - v_[226] * v_[235]; v_[246] = v_[219] * v_[224] + v_[245] * v_[247]; v_[221] = 0.0004405 * v_[228] + v_[237] * v_[228] - -0.1104774 * v_[225]; v_[233] = 0.026409 + -0.2209548 * v_[228] + v_[233] * v_[228] - v_[236]; v_[228] = v_[222] * v_[221] + v_[220] * v_[233]; v_[237] = 0.000468 - 0.000881 * v_[225] - v_[237] * v_[225] - v_[236]; v_[221] = v_[222] * v_[237] + v_[220] * v_[221]; v_[238] = v_[226] / v_[232]; v_[227] = 0.007418 + v_[228] * v_[223] + v_[221] * v_[220] - v_[231] * v_[238]; v_[234] = -0.0825825 + v_[234]; v_[248] = -0.08 * v_[219]; v_[249] = -0.08 * v_[244]; v_[250] = -1 * v_[249]; v_[251] = 0 - 4.027 * v_[250]; v_[240] = -0.47422804 - v_[240]; v_[252] = v_[227] + v_[234] * v_[248] + v_[251] * v_[248] - v_[240] * v_[250]; v_[221] = v_[221] * v_[222] + v_[228] * v_[220]; v_[238] = 0.087136 + v_[236] + v_[221] - v_[226] * v_[238]; v_[228] = 4.027 * v_[248]; v_[253] = -7.4114911576828e-06 * v_[222] + 0.00185880198235362 * v_[220]; v_[254] = 0.00185880198235362 * v_[222] + -0.466187537174287 * v_[220]; v_[255] = v_[253] * v_[222] + v_[254] * v_[220]; v_[235] = 3.56080505133456 - v_[255] - v_[239] * v_[235]; v_[254] = v_[254] * v_[223] + v_[253] * v_[220] - v_[239] * v_[242]; v_[253] = v_[250] * v_[235] - v_[248] * v_[254]; v_[255] = 4.027 + v_[255] - v_[241] * v_[242]; v_[242] = v_[250] * v_[254] - v_[248] * v_[255]; v_[256] = v_[253] * v_[250] - v_[242] * v_[248]; v_[257] = 2. * v_[240] * v_[248] + v_[238] + v_[228] * v_[248] - v_[256]; v_[258] = v_[244] * v_[252] + v_[219] * v_[257]; v_[221] = 0.005495 + v_[233] + v_[237] + v_[236] - v_[221] - v_[231] * v_[231] / v_[232]; v_[237] = v_[221] - 2. * v_[234] * v_[250] - v_[251] * v_[250] - v_[256]; v_[252] = v_[244] * v_[237] + v_[219] * v_[252]; v_[233] = -0.022734 + v_[258] * v_[245] + v_[252] * v_[219]; v_[236] = v_[244] * v_[224] + v_[219] * v_[247]; v_[259] = v_[244] * v_[243] + v_[219] * v_[230]; v_[260] = v_[236] * v_[219] + v_[259] * v_[245]; v_[252] = v_[252] * v_[244] + v_[258] * v_[219]; v_[258] = 0.011033 + v_[256] + v_[252]; v_[261] = v_[260] / v_[258]; v_[262] = v_[229] * v_[245] + v_[246] * v_[219] - v_[233] * v_[261]; v_[263] = -0.207 * v_[180]; v_[264] = -1 * v_[263]; v_[265] = sin(x_in[6]); v_[266] = 0 - v_[265]; v_[267] = -0.3735 * v_[265]; v_[268] = -0.3735 * v_[180]; v_[269] = v_[266] * v_[267] + v_[169] * v_[268]; v_[228] = v_[228] + v_[240]; v_[270] = 0 - v_[228]; v_[251] = v_[251] + v_[234]; v_[271] = v_[219] * v_[270] + v_[245] * v_[251]; v_[272] = 0.1466472 + v_[271]; v_[273] = v_[272] / v_[258]; v_[274] = 0 - v_[260] * v_[273]; v_[275] = v_[269] * v_[274]; v_[276] = v_[255] - 4.027; v_[277] = v_[244] * v_[254] + v_[219] * v_[276]; v_[278] = v_[235] - 4.027; v_[279] = v_[244] * v_[278] + v_[219] * v_[254]; v_[259] = v_[236] * v_[244] + v_[259] * v_[219]; v_[236] = v_[259] / v_[258]; v_[280] = v_[277] * v_[245] + v_[279] * v_[219] - v_[260] * v_[236]; v_[281] = v_[264] * v_[280]; v_[282] = v_[275] - v_[281]; v_[270] = v_[244] * v_[270] + v_[219] * v_[251]; v_[283] = -0.5824264 - v_[270] - v_[233] * v_[273]; v_[284] = 0.207 * v_[265]; v_[285] = 7.467 - v_[272] * v_[273]; v_[286] = 0 - v_[272] * v_[236]; v_[287] = v_[269] * v_[285] - v_[264] * v_[286]; v_[288] = v_[262] * v_[264] + v_[282] * v_[264] - v_[283] * v_[284] - v_[287] * v_[284]; v_[289] = v_[224] * v_[250] - v_[243] * v_[248]; v_[250] = v_[247] * v_[250] - v_[230] * v_[248]; v_[248] = 0 - v_[250]; v_[290] = -5.1e-05 + v_[245] * v_[289] + v_[219] * v_[248]; v_[273] = 0 - v_[290] * v_[273]; v_[291] = v_[284] * v_[286]; v_[275] = v_[291] - v_[275]; v_[228] = -1 * (v_[242] + v_[228]); v_[251] = -1 * (v_[253] + v_[251]); v_[270] = 0.5824264 + v_[228] * v_[245] + v_[251] * v_[219] + v_[270] - v_[290] * v_[261]; v_[279] = v_[279] * v_[244] + v_[277] * v_[219]; v_[278] = 7.467 + v_[276] + v_[278] - v_[279] - v_[260] * v_[261]; v_[276] = v_[284] * v_[280] - v_[269] * v_[278]; v_[261] = v_[273] * v_[284] + v_[275] * v_[284] - v_[270] * v_[264] - v_[276] * v_[264]; v_[277] = 0 - v_[169] * v_[288] - v_[266] * v_[261]; v_[35] = -0.1466472 + v_[12] * v_[32] + v_[35] * v_[29] - v_[54] - v_[74] * v_[20]; v_[12] = v_[74] / v_[42]; v_[54] = 0.144171 + v_[40] - v_[74] * v_[12]; v_[62] = 7.467 + v_[62] - v_[43] * v_[20]; v_[253] = v_[67] * v_[62] - v_[52] * v_[63]; v_[242] = v_[47] * v_[61] - v_[67] * v_[57]; v_[292] = v_[47] * v_[57] - v_[67] * v_[68]; v_[293] = v_[242] * v_[47] - v_[292] * v_[67]; v_[294] = 2. * v_[35] * v_[67] + v_[54] + v_[253] * v_[67] - 2. * v_[53] * v_[52] - v_[59] * v_[52] - v_[293]; v_[36] = 0.134705 + v_[41] + v_[21] + v_[40] - v_[36] - v_[17] * v_[17] / v_[42]; v_[20] = v_[13] * v_[32] + v_[30] * v_[29] - v_[17] * v_[20]; v_[30] = v_[52] * v_[69] - v_[47] * v_[62]; v_[13] = 2. * v_[66] * v_[52] + v_[36] + v_[70] * v_[52] - 2. * v_[20] * v_[47] - v_[30] * v_[47] - v_[293]; v_[106] = -0.1466472 + v_[80] * v_[76] + v_[106] * v_[94] - v_[126] - v_[145] * v_[85]; v_[80] = v_[145] / v_[113]; v_[126] = 0.144171 + v_[111] - v_[145] * v_[80]; v_[134] = 7.467 + v_[134] - v_[114] * v_[85]; v_[21] = v_[139] * v_[134] - v_[124] * v_[135]; v_[41] = v_[119] * v_[133] - v_[139] * v_[129]; v_[40] = v_[119] * v_[129] - v_[139] * v_[140]; v_[295] = v_[41] * v_[119] - v_[40] * v_[139]; v_[296] = 2. * v_[106] * v_[139] + v_[126] + v_[21] * v_[139] - 2. * v_[125] * v_[124] - v_[131] * v_[124] - v_[295]; v_[107] = 0.134705 + v_[112] + v_[86] + v_[111] - v_[107] - v_[82] * v_[82] / v_[113]; v_[85] = v_[97] * v_[76] + v_[99] * v_[94] - v_[82] * v_[85]; v_[99] = v_[124] * v_[141] - v_[119] * v_[134]; v_[97] = 2. * v_[138] * v_[124] + v_[107] + v_[142] * v_[124] - 2. * v_[85] * v_[119] - v_[99] * v_[119] - v_[295]; v_[178] = -0.1466472 + v_[155] * v_[175] + v_[178] * v_[172] - v_[197] - v_[217] * v_[163]; v_[155] = v_[217] / v_[185]; v_[197] = 0.144171 + v_[183] - v_[217] * v_[155]; v_[205] = 7.467 + v_[205] - v_[186] * v_[163]; v_[86] = v_[210] * v_[205] - v_[195] * v_[206]; v_[112] = v_[190] * v_[204] - v_[210] * v_[200]; v_[111] = v_[190] * v_[200] - v_[210] * v_[211]; v_[297] = v_[112] * v_[190] - v_[111] * v_[210]; v_[298] = 2. * v_[178] * v_[210] + v_[197] + v_[86] * v_[210] - 2. * v_[196] * v_[195] - v_[202] * v_[195] - v_[297]; v_[179] = 0.134705 + v_[184] + v_[164] + v_[183] - v_[179] - v_[160] * v_[160] / v_[185]; v_[163] = v_[156] * v_[175] + v_[173] * v_[172] - v_[160] * v_[163]; v_[173] = v_[195] * v_[212] - v_[190] * v_[205]; v_[156] = 2. * v_[209] * v_[195] + v_[179] + v_[213] * v_[195] - 2. * v_[163] * v_[190] - v_[173] * v_[190] - v_[297]; v_[251] = -0.1466472 + v_[228] * v_[219] + v_[251] * v_[244] - v_[271] - v_[290] * v_[236]; v_[228] = v_[290] / v_[258]; v_[271] = 0.144171 + v_[256] - v_[290] * v_[228]; v_[279] = 7.467 + v_[279] - v_[259] * v_[236]; v_[164] = v_[284] * v_[279] - v_[269] * v_[280]; v_[184] = v_[264] * v_[278] - v_[284] * v_[274]; v_[183] = v_[264] * v_[274] - v_[284] * v_[285]; v_[299] = v_[184] * v_[264] - v_[183] * v_[284]; v_[300] = 2. * v_[251] * v_[284] + v_[271] + v_[164] * v_[284] - 2. * v_[270] * v_[269] - v_[276] * v_[269] - v_[299]; v_[252] = 0.134705 + v_[257] + v_[237] + v_[256] - v_[252] - v_[233] * v_[233] / v_[258]; v_[236] = v_[229] * v_[219] + v_[246] * v_[244] - v_[233] * v_[236]; v_[246] = v_[269] * v_[286] - v_[264] * v_[279]; v_[229] = 2. * v_[283] * v_[269] + v_[252] + v_[287] * v_[269] - 2. * v_[236] * v_[264] - v_[246] * v_[264] - v_[299]; v_[12] = -3.6e-05 + 2. * v_[33] * v_[29] + 2. * v_[72] * v_[5] + v_[32] * v_[72] + v_[29] * v_[73] - v_[17] * v_[12]; v_[67] = v_[56] * v_[52] + v_[12] + v_[20] * v_[67] + v_[30] * v_[67] - v_[46] * v_[52] - v_[35] * v_[47] - v_[65] * v_[52]; v_[52] = v_[48] * v_[67]; v_[73] = (v_[48] * v_[13] + v_[9] * v_[67]) * v_[48] + (v_[52] + v_[9] * v_[294]) * v_[9]; v_[52] = v_[50] * v_[13] + v_[52]; v_[67] = v_[50] * v_[67] + v_[48] * v_[294]; v_[33] = v_[52] * v_[50] + v_[67] * v_[48]; v_[80] = 3.6e-05 + -1 * 2. * v_[105] * v_[95] + -1 * 2. * v_[144] * v_[76] + v_[76] * v_[144] + v_[94] * v_[103] - v_[82] * v_[80]; v_[139] = v_[128] * v_[124] + v_[80] + v_[85] * v_[139] + v_[99] * v_[139] - v_[117] * v_[124] - v_[106] * v_[119] - v_[137] * v_[124]; v_[124] = v_[26] * v_[139]; v_[119] = (v_[121] * v_[97] + v_[124]) * v_[121] + (v_[121] * v_[139] + v_[26] * v_[296]) * v_[26]; v_[139] = v_[26] * v_[97] + v_[120] * v_[139]; v_[124] = v_[124] + v_[120] * v_[296]; v_[103] = v_[139] * v_[26] + v_[124] * v_[120]; v_[155] = 3.6e-05 + 2. * v_[176] * v_[172] + 2. * v_[215] * v_[92] + v_[175] * v_[215] + v_[172] * v_[216] - v_[160] * v_[155]; v_[210] = v_[199] * v_[195] + v_[155] + v_[163] * v_[210] + v_[173] * v_[210] - v_[189] * v_[195] - v_[178] * v_[190] - v_[208] * v_[195]; v_[195] = v_[191] * v_[210]; v_[216] = (v_[191] * v_[156] + v_[108] * v_[210]) * v_[191] + (v_[195] + v_[108] * v_[298]) * v_[108]; v_[195] = v_[193] * v_[156] + v_[195]; v_[210] = v_[193] * v_[210] + v_[191] * v_[298]; v_[176] = v_[195] * v_[193] + v_[210] * v_[191]; v_[228] = -3.6e-05 + -1 * 2. * v_[250] * v_[245] + -1 * 2. * v_[289] * v_[219] + v_[219] * v_[289] + v_[244] * v_[248] - v_[233] * v_[228]; v_[284] = v_[273] * v_[269] + v_[228] + v_[236] * v_[284] + v_[246] * v_[284] - v_[262] * v_[269] - v_[251] * v_[264] - v_[282] * v_[269]; v_[269] = v_[169] * v_[284]; v_[264] = (v_[266] * v_[229] + v_[269]) * v_[266] + (v_[266] * v_[284] + v_[169] * v_[300]) * v_[169]; v_[284] = v_[169] * v_[229] + v_[265] * v_[284]; v_[269] = v_[269] + v_[265] * v_[300]; v_[248] = v_[284] * v_[169] + v_[269] * v_[265]; v_[229] = 1.209488 + v_[294] + v_[13] + v_[293] + v_[296] + v_[97] + v_[295] + v_[298] + v_[156] + v_[297] + v_[300] + v_[229] + v_[299] - v_[73] - v_[33] - v_[119] - v_[103] - v_[216] - v_[176] - v_[264] - v_[248]; v_[300] = (0.190812 + v_[60] + v_[132] + v_[203] + v_[277]) / v_[229]; v_[156] = x_in[20] * x_in[21]; v_[298] = x_in[18] * x_in[23]; v_[97] = x_in[18] * x_in[19]; v_[296] = x_in[18] * x_in[20]; v_[13] = v_[9] * x_in[19] + v_[48] * x_in[20]; v_[294] = 0.207 * x_in[20] + x_in[21]; v_[250] = x_in[18] + x_in[33]; v_[289] = -0.207 * v_[48]; v_[215] = v_[51] * x_in[20] + v_[49] * x_in[19] + v_[289] * x_in[18] + v_[48] * x_in[23] + v_[9] * x_in[22]; v_[105] = v_[13] * v_[294] - v_[250] * v_[215]; v_[144] = v_[13] * v_[250]; v_[72] = v_[250] * v_[250]; v_[237] = v_[48] * x_in[19] + v_[50] * x_in[20]; v_[257] = v_[237] * v_[237]; v_[256] = v_[13] + x_in[34]; v_[301] = 0 - v_[28]; v_[302] = -0.3735 * v_[48]; v_[303] = -0.3735 * v_[9]; v_[304] = 0 - v_[9]; v_[305] = v_[302] * x_in[20] + v_[303] * x_in[19] + v_[47] * x_in[18] + v_[304] * x_in[23] + v_[48] * x_in[22]; v_[306] = v_[301] * v_[294] + v_[5] * v_[305] + v_[34] * v_[13]; v_[307] = v_[256] * v_[306]; v_[308] = v_[5] * v_[237] + v_[29] * v_[250]; v_[309] = 0.08 * v_[250] + v_[215]; v_[310] = v_[308] * v_[309]; v_[311] = v_[29] * v_[237] + v_[32] * v_[250]; v_[312] = v_[308] * v_[311]; v_[313] = v_[308] * v_[256]; v_[314] = v_[256] + x_in[35]; v_[315] = 0 - v_[5]; v_[316] = 0 - v_[28]; v_[317] = 0.08 * v_[5]; v_[318] = v_[315] * v_[294] + v_[316] * v_[305] + v_[317] * v_[13]; v_[319] = v_[7] * v_[318] + v_[2] * v_[306] + v_[0] * v_[256]; v_[320] = v_[314] * v_[319]; v_[321] = v_[2] * v_[308] + v_[7] * v_[311]; v_[322] = -0.35 * v_[311] + v_[309]; v_[323] = v_[321] * v_[322]; v_[324] = v_[11] * v_[308] + v_[2] * v_[311]; v_[325] = v_[321] * v_[324]; v_[326] = -0.3735 + -0.33 * v_[5] * v_[7] - 0.33 * v_[28] * v_[2] - 0.35 * v_[28]; v_[327] = 0.5 * x_in[0]; v_[328] = cos(v_[327]); v_[329] = 0.5 * x_in[1]; v_[330] = cos(v_[329]); v_[331] = v_[328] * v_[330]; v_[332] = 0.5 * x_in[2]; v_[333] = sin(v_[332]); v_[327] = sin(v_[327]); v_[329] = sin(v_[329]); v_[334] = v_[327] * v_[329]; v_[332] = cos(v_[332]); v_[335] = v_[331] * v_[333] + v_[334] * v_[332]; v_[336] = 2. * v_[335]; v_[329] = v_[328] * v_[329]; v_[327] = v_[327] * v_[330]; v_[330] = v_[329] * v_[332] - v_[327] * v_[333]; v_[328] = v_[336] * v_[330]; v_[327] = v_[327] * v_[332] + v_[329] * v_[333]; v_[329] = 2. * v_[327]; v_[332] = v_[331] * v_[332] - v_[334] * v_[333]; v_[334] = v_[329] * v_[332]; v_[333] = 0.5 * x_in[0]; v_[331] = cos(v_[333]); v_[337] = 0.5 * x_in[1]; v_[338] = sin(v_[337]); v_[339] = v_[331] * v_[338]; v_[340] = 0.5 * x_in[2]; v_[341] = cos(v_[340]); v_[333] = sin(v_[333]); v_[337] = cos(v_[337]); v_[342] = v_[333] * v_[337]; v_[340] = sin(v_[340]); v_[343] = v_[339] * v_[341] - v_[342] * v_[340]; v_[344] = 2. * v_[343]; v_[345] = v_[344] * v_[343]; v_[342] = v_[342] * v_[341] + v_[339] * v_[340]; v_[339] = 2. * v_[342]; v_[346] = v_[339] * v_[342]; v_[347] = cos(x_in[15]); v_[348] = 0.33 * v_[347]; v_[349] = sin(x_in[15]); v_[350] = -0.33 * v_[349]; v_[351] = -0.207 + 0.33 * v_[349] * v_[5] * v_[2] + -0.33 * v_[349] * v_[28] * v_[7] + 0.35 * v_[349] * v_[5] + 0.08 * v_[349]; v_[352] = -0.3735 + -0.33 * v_[5] * v_[7] - 0.33 * v_[28] * v_[2] - 0.35 * v_[28]; v_[348] = (v_[348] * v_[5] * v_[7] + 0.33 * v_[347] * v_[28] * v_[2]) * x_in[35] + (v_[348] * v_[28] * v_[2] + 0.33 * v_[347] * v_[5] * v_[7] + 0.35 * v_[347] * v_[28]) * x_in[34] + (0.33 * v_[349] * v_[5] * v_[2] + v_[350] * v_[28] * v_[7] + 0.35 * v_[349] * v_[5] + 0.08 * v_[349]) * x_in[33] + x_in[23] + x_in[18] * v_[351] - x_in[19] * v_[352]; v_[337] = v_[331] * v_[337]; v_[333] = v_[333] * v_[338]; v_[338] = v_[337] * v_[340] + v_[333] * v_[341]; v_[331] = 2. * v_[338]; v_[343] = v_[331] * v_[343]; v_[333] = v_[337] * v_[341] - v_[333] * v_[340]; v_[339] = v_[339] * v_[333]; v_[337] = 0.33 * v_[347] * v_[28] * v_[7] - 0.33 * v_[347] * v_[5] * v_[2] - 0.35 * v_[347] * v_[5] - 0.08 * v_[347]; v_[352] = (-0.33 * v_[349] * v_[5] * v_[7] - 0.33 * v_[349] * v_[28] * v_[2]) * x_in[35] + (v_[350] * v_[5] * v_[7] - 0.33 * v_[349] * v_[28] * v_[2] - 0.35 * v_[349] * v_[28]) * x_in[34] + (0.33 * v_[347] * v_[5] * v_[2] + -0.33 * v_[347] * v_[28] * v_[7] + 0.35 * v_[347] * v_[5] + 0.08 * v_[347]) * x_in[33] + x_in[22] + x_in[20] * v_[352] - x_in[18] * v_[337]; v_[350] = v_[331] * v_[342]; v_[340] = v_[344] * v_[333]; v_[337] = (0.33 * v_[28] * v_[7] - 0.33 * v_[5] * v_[2]) * x_in[35] + (0.33 * v_[28] * v_[7] - 0.33 * v_[5] * v_[2] - 0.35 * v_[5]) * x_in[34] + x_in[21] + x_in[19] * v_[337] - x_in[20] * v_[351]; v_[351] = 0.5 * x_in[0]; v_[341] = cos(v_[351]); v_[353] = 0.5 * x_in[1]; v_[354] = sin(v_[353]); v_[355] = v_[341] * v_[354]; v_[356] = 0.5 * x_in[2]; v_[357] = cos(v_[356]); v_[351] = sin(v_[351]); v_[353] = cos(v_[353]); v_[358] = v_[351] * v_[353]; v_[356] = sin(v_[356]); v_[359] = v_[355] * v_[357] - v_[358] * v_[356]; v_[360] = 2. * v_[359]; v_[358] = v_[358] * v_[357] + v_[355] * v_[356]; v_[355] = 2. * v_[358]; v_[353] = v_[341] * v_[353]; v_[354] = v_[351] * v_[354]; v_[361] = 2. * (v_[353] * v_[356] + v_[354] * v_[357]); v_[354] = v_[353] * v_[357] - v_[354] * v_[356]; v_[354] = (1 - v_[360] * v_[359] - v_[355] * v_[358]) * (0.33 * v_[347] * v_[28] * v_[7] - 0.33 * v_[347] * v_[5] * v_[2] - 0.35 * v_[347] * v_[5] - 0.08 * v_[347]) + (v_[361] * v_[359] + v_[355] * v_[354]) * (-0.207 + 0.33 * v_[349] * v_[5] * v_[2] + -0.33 * v_[349] * v_[28] * v_[7] + 0.35 * v_[349] * v_[5] + 0.08 * v_[349]) + (v_[361] * v_[358] - v_[360] * v_[354]) * (-0.3735 + -0.33 * v_[5] * v_[7] - 0.33 * v_[28] * v_[2] - 0.35 * v_[28]) + x_in[5]; v_[361] = 1 / (1 + exp(100. * v_[354])); v_[354] = -1000. * ((1 - v_[345] - v_[346]) * v_[348] + (v_[343] + v_[339]) * v_[352] + (v_[350] - v_[340]) * v_[337]) * v_[361] + 5000. * exp(-100. * (v_[354] + 0.02)); v_[335] = v_[336] * v_[335]; v_[329] = v_[329] * v_[327]; v_[338] = v_[331] * v_[338]; v_[342] = v_[344] * v_[342]; v_[333] = v_[331] * v_[333]; v_[339] = -1000. * ((v_[343] - v_[339]) * v_[348] + (1 - v_[338] - v_[346]) * v_[352] + (v_[342] + v_[333]) * v_[337]) * v_[361]; v_[343] = 2. * v_[330]; v_[346] = v_[343] * v_[327]; v_[331] = v_[336] * v_[332]; v_[333] = -1000. * ((v_[350] + v_[340]) * v_[348] + (v_[342] - v_[333]) * v_[352] + (1 - v_[338] - v_[345]) * v_[337]) * v_[361]; v_[342] = (v_[328] + v_[334]) * v_[354] + (1 - v_[335] - v_[329]) * v_[339] + (v_[346] - v_[331]) * v_[333]; v_[338] = -0.207 + 0.33 * v_[48] * v_[5] * v_[2] + -0.33 * v_[48] * v_[28] * v_[7] + 0.35 * v_[48] * v_[5] + 0.08 * v_[48]; v_[327] = v_[336] * v_[327]; v_[332] = v_[343] * v_[332]; v_[343] = v_[343] * v_[330]; v_[331] = (v_[327] - v_[332]) * v_[354] + (v_[346] + v_[331]) * v_[339] + (1 - v_[335] - v_[343]) * v_[333]; v_[346] = v_[326] * v_[342] - v_[338] * v_[331]; v_[335] = 0.33 * v_[9] * v_[28] * v_[7] - 0.33 * v_[9] * v_[5] * v_[2] - 0.35 * v_[9] * v_[5] - 0.08 * v_[9]; v_[343] = (1 - v_[343] - v_[329]) * v_[354] + (v_[328] - v_[334]) * v_[339] + (v_[327] + v_[332]) * v_[333]; v_[326] = v_[335] * v_[331] - v_[326] * v_[343]; v_[332] = 0 - v_[7]; v_[327] = 0.35 * v_[2]; v_[333] = v_[2] * v_[318] + v_[332] * v_[306] + v_[327] * v_[256]; v_[322] = v_[324] * v_[322] - v_[314] * v_[333]; v_[339] = 0.1104774 * (v_[320] - v_[323]) + 0.025941 * v_[325] - v_[48] * v_[346] - v_[9] * v_[326] - (-0.35 * v_[9] * v_[28] - 0.3735 * v_[9]) * v_[343] - (0.35 * v_[48] * v_[28] + 0.3735 * v_[48]) * v_[342] - (0.08 + 0.35 * v_[5] - 0.207 * v_[48]) * v_[331] - -0.0004405 * v_[322]; v_[329] = x_in[47] - v_[339]; v_[354] = v_[324] * v_[314]; v_[334] = (0 - v_[319]) * x_in[35]; v_[328] = v_[333] * x_in[35]; v_[330] = v_[324] * x_in[35]; v_[336] = v_[9] * v_[5]; v_[361] = v_[9] * v_[28]; v_[337] = v_[336] * v_[7] + v_[361] * v_[2]; v_[340] = v_[48] * v_[28]; v_[350] = v_[340] * v_[2]; v_[352] = v_[28] * v_[7] - v_[5] * v_[2]; v_[348] = v_[314] * v_[314]; v_[345] = v_[321] * v_[321]; v_[344] = -0.881 * (v_[323] - v_[320]) + 0.1104774 * v_[325] + 8.81e-05 * v_[354] + 0.414812462825713 * v_[334] + -0.00185880198235362 * v_[328] + -8.81e-05 * v_[330] + (0.1104774 * v_[329]) / 0.026181 - v_[337] * v_[343] - ((0 - v_[48]) * v_[5] * v_[7] - v_[350]) * v_[342] - v_[352] * v_[331] - -0.0004405 * (v_[348] + v_[345]); v_[314] = v_[321] * v_[314]; v_[355] = (0 - v_[321]) * x_in[35]; v_[361] = v_[361] * v_[7] - v_[336] * v_[2]; v_[340] = v_[48] * v_[5] * v_[2] - v_[340] * v_[7]; v_[336] = v_[28] * v_[2]; v_[358] = v_[324] * v_[324]; v_[348] = 0.881 * v_[322] + 8.81e-05 * v_[314] + -0.0004405 * v_[325] + -0.00185880198235362 * v_[334] + 0.880992588508842 * v_[328] + 8.81e-05 * v_[355] + (0.0004405 * v_[329]) / 0.026181 - v_[361] * v_[343] - v_[340] * v_[342] - ((0 - v_[5]) * v_[7] - v_[336]) * v_[331] - 0.1104774 * (v_[348] + v_[358]); v_[309] = v_[311] * v_[309] - v_[256] * v_[318]; v_[325] = v_[311] * v_[256]; v_[360] = v_[311] * v_[311]; v_[359] = v_[308] * v_[308]; v_[339] = 0.47422804 * (v_[307] - v_[310]) + 0.081641 * v_[312] + 2.1e-05 * v_[313] + (0.026181 * v_[329]) / 0.026181 + v_[339] + v_[327] * v_[344] + v_[0] * v_[348] - 0.0825825 * v_[309] - -0.000102 * v_[325] - -0.007418 * (v_[360] - v_[359]); v_[349] = x_in[46] - v_[339]; v_[347] = (0 - v_[308]) * x_in[34]; v_[353] = v_[311] * x_in[34]; v_[356] = (0 - v_[306]) * x_in[34]; v_[357] = v_[318] * x_in[34]; v_[256] = v_[256] * v_[256]; v_[310] = -3.146 * (v_[310] - v_[307]) + 0.47422804 * v_[312] + v_[2] * v_[344] + v_[7] * v_[348] + v_[14] * v_[347] + v_[27] * v_[353] + v_[39] * v_[356] + v_[38] * v_[357] + (v_[25] * v_[349]) / v_[16] - 0.0825825 * (v_[256] + v_[359]); v_[309] = 3.146 * v_[309] + 0.0825825 * v_[312] + v_[332] * v_[344] + v_[2] * v_[348] + v_[31] * v_[347] + v_[8] * v_[353] + v_[38] * v_[356] + v_[19] * v_[357] + (v_[23] * v_[349]) / v_[16] - 0.47422804 * (v_[256] + v_[360]); v_[348] = (0 - v_[237]) * x_in[33]; v_[344] = v_[13] * x_in[33]; v_[38] = (0 - v_[305]) * x_in[33]; v_[19] = v_[215] * x_in[33]; v_[307] = v_[237] * v_[13]; v_[39] = v_[250] * v_[305]; v_[294] = v_[237] * v_[294]; v_[250] = v_[237] * v_[250]; v_[335] = v_[338] * v_[343] - v_[335] * v_[342]; v_[333] = v_[321] * v_[333] - v_[324] * v_[319]; v_[352] = 8.81e-05 * v_[322] + 8.81e-05 * v_[328] + 0.026409 * v_[355] - v_[337] * v_[346] - ((0 - v_[48]) * v_[5] * v_[7] - v_[350]) * v_[326] - v_[352] * v_[335] - (((0.207 - 0.08 * v_[48]) * v_[28] + 0.3735 * v_[48] * v_[5]) * v_[7] + ((0.08 * v_[48] - 0.207) * v_[5] + 0.3735 * v_[48] * v_[28] + 0.35 * v_[48]) * v_[2]) * v_[343] - ((0.3735 * v_[9] * v_[5] - 0.08 * v_[9] * v_[28]) * v_[7] + (0.08 * v_[9] * v_[5] + 0.3735 * v_[9] * v_[28] + 0.35 * v_[9]) * v_[2]) * v_[342] - (-0.207 * v_[9] * v_[5] * v_[7] - 0.207 * v_[9] * v_[28] * v_[2]) * v_[331] - 0.1104774 * v_[333] - 0.025713 * v_[314]; v_[335] = -0.0004405 * v_[333] + 8.81e-05 * (v_[323] - v_[320]) + -0.000227999999999999 * v_[354] + -8.81e-05 * v_[334] + 0.000468 * v_[330] - v_[361] * v_[346] - v_[340] * v_[326] - ((0 - v_[5]) * v_[7] - v_[336]) * v_[335] - (((0.08 * v_[48] - 0.207) * v_[5] + 0.3735 * v_[48] * v_[28] + 0.35 * v_[48]) * v_[7] + ((0.08 * v_[48] - 0.207) * v_[28] - 0.3735 * v_[48] * v_[5]) * v_[2]) * v_[343] - ((0.08 * v_[9] * v_[5] + 0.3735 * v_[9] * v_[28] + 0.35 * v_[9]) * v_[7] + (0.08 * v_[9] * v_[28] - 0.3735 * v_[9] * v_[5]) * v_[2]) * v_[342] - (0.207 * v_[9] * v_[5] * v_[2] - 0.207 * v_[9] * v_[28] * v_[7]) * v_[331]; v_[333] = 0.881 * v_[333] + 0.1104774 * v_[314] + -0.0004405 * v_[354] + -0.1104774 * v_[355] + -0.0004405 * v_[330] - v_[48] * v_[343] - v_[9] * v_[342] - 8.81e-05 * (v_[358] + v_[345]); v_[318] = v_[308] * v_[318] - v_[311] * v_[306]; v_[359] = -0.007418 * v_[325] + -0.000102 * (v_[256] - v_[359]) + v_[2] * v_[352] + v_[7] * v_[335] + -0.35 * v_[333] + v_[22] * v_[347] + v_[10] * v_[353] + v_[14] * v_[356] + v_[31] * v_[357] + (v_[6] * v_[349]) / v_[16] - 0.47422804 * v_[318] - 0.084376 * v_[313] - 2.1e-05 * v_[312]; v_[335] = 0.002735 * v_[325] + 0.0825825 * v_[318] + -0.000102 * v_[312] + v_[11] * v_[352] + v_[2] * v_[335] + v_[10] * v_[347] + v_[1] * v_[353] + v_[27] * v_[356] + v_[8] * v_[357] + (v_[15] * v_[349]) / v_[16] - -0.007418 * v_[313] - 2.1e-05 * (v_[256] - v_[360]); v_[318] = 0.47422804 * v_[313] + 3.146 * v_[318] + 0.0825825 * v_[325] + v_[333] + v_[24] * v_[347] + v_[18] * v_[353]; v_[333] = v_[13] * v_[13]; v_[325] = 0.009466 * v_[307] + 0.1466472 * (v_[39] - v_[294]) + -5.1e-05 * v_[250] + v_[32] * v_[359] + v_[29] * v_[335] + 0.08 * v_[318] - -0.022734 * v_[144] - -3.6e-05 * (v_[333] - v_[257]); v_[313] = x_in[45] - v_[325]; v_[305] = v_[237] * v_[215] - v_[13] * v_[305]; v_[339] = 0.5824264 * v_[105] + -3.6e-05 * v_[144] + -0.022734 * (v_[72] - v_[257]) + (v_[16] * v_[349]) / v_[16] + v_[339] + v_[317] * v_[310] + v_[34] * v_[309] + v_[54] * v_[348] + v_[12] * v_[344] + v_[56] * v_[38] + v_[53] * v_[19] + (v_[74] * v_[313]) / v_[42] - 0.1466472 * v_[305] - -0.123672 * v_[250] - -5.1e-05 * v_[307]; v_[335] = -0.133138 * v_[144] + 0.5824264 * (v_[294] - v_[39]) + -0.022734 * v_[307] + v_[29] * v_[359] + v_[5] * v_[335] + v_[12] * v_[348] + v_[36] * v_[344] + v_[66] * v_[38] + v_[46] * v_[19] + (v_[17] * v_[313]) / v_[42] - -3.6e-05 * v_[250] - -5.1e-05 * (v_[72] - v_[333]); v_[305] = 3.44 * v_[305] + 0.1466472 * v_[250] + v_[315] * v_[310] + v_[301] * v_[309] + v_[35] * v_[348] + v_[20] * v_[344] + v_[69] * v_[38] + v_[63] * v_[19] + (v_[43] * v_[313]) / v_[42] - 0.5824264 * (v_[333] + v_[257]); v_[318] = 0.1466472 * v_[307] + -3.44 * (v_[294] - v_[39]) + 0.5824264 * v_[144] + v_[318] + v_[56] * v_[348] + v_[66] * v_[344] + v_[68] * v_[38] + v_[57] * v_[19] + (v_[55] * v_[313]) / v_[42]; v_[333] = 3.44 * v_[105] + 0.5824264 * v_[250] + v_[316] * v_[310] + v_[5] * v_[309] + v_[53] * v_[348] + v_[46] * v_[344] + v_[57] * v_[38] + v_[61] * v_[19] + (v_[44] * v_[313]) / v_[42] - 0.1466472 * (v_[72] + v_[333]); v_[250] = v_[26] * x_in[19] + v_[120] * x_in[20]; v_[309] = 0.207 * x_in[20] + -1 * x_in[21]; v_[310] = -1 * x_in[18] + x_in[30]; v_[72] = 0.207 * v_[120]; v_[105] = 0 - v_[37]; v_[294] = v_[123] * x_in[20] + v_[122] * x_in[19] + v_[72] * x_in[18] + v_[120] * x_in[23] + v_[105] * x_in[22]; v_[39] = v_[250] * v_[309] - v_[310] * v_[294]; v_[307] = v_[250] * v_[310]; v_[144] = v_[310] * v_[310]; v_[257] = v_[121] * x_in[19] + v_[26] * x_in[20]; v_[359] = v_[257] * v_[257]; v_[12] = -0.08 * v_[76]; v_[36] = v_[76] * v_[257] + v_[94] * v_[310]; v_[54] = -0.08 * v_[310] + -1 * v_[294]; v_[237] = v_[36] * v_[54]; v_[215] = -1 * v_[250] + x_in[31]; v_[13] = 0.3735 * v_[120]; v_[24] = -0.3735 * v_[37]; v_[18] = 0 - v_[37]; v_[352] = 0 - v_[120]; v_[256] = v_[13] * x_in[20] + v_[24] * x_in[19] + v_[118] * x_in[18] + v_[18] * x_in[23] + v_[352] * x_in[22]; v_[360] = v_[94] * v_[309] + v_[76] * v_[256] + v_[104] * v_[250]; v_[312] = v_[215] * v_[360]; v_[1] = v_[95] * v_[257] + v_[76] * v_[310]; v_[10] = v_[36] * v_[1]; v_[8] = v_[79] * v_[36] + v_[77] * v_[1]; v_[27] = -0.35 * v_[1] + v_[54]; v_[11] = v_[8] * v_[27]; v_[22] = v_[215] + x_in[32]; v_[31] = 0 - v_[94]; v_[14] = v_[76] * v_[309] + v_[31] * v_[256] + v_[12] * v_[250]; v_[311] = v_[77] * v_[14] + v_[79] * v_[360] + v_[78] * v_[215]; v_[308] = v_[22] * v_[311]; v_[306] = v_[89] * v_[36] + v_[79] * v_[1]; v_[358] = v_[8] * v_[306]; v_[355] = (0 - v_[311]) * x_in[32]; v_[314] = 0 - v_[77]; v_[345] = 0.35 * v_[79]; v_[330] = v_[79] * v_[14] + v_[314] * v_[360] + v_[345] * v_[215]; v_[354] = v_[330] * x_in[32]; v_[343] = -0.3735 + -0.33 * v_[76] * v_[77] - 0.33 * v_[94] * v_[79] - 0.35 * v_[94]; v_[342] = 0.5 * x_in[0]; v_[336] = cos(v_[342]); v_[340] = 0.5 * x_in[1]; v_[361] = cos(v_[340]); v_[326] = v_[336] * v_[361]; v_[346] = 0.5 * x_in[2]; v_[331] = sin(v_[346]); v_[342] = sin(v_[342]); v_[340] = sin(v_[340]); v_[323] = v_[342] * v_[340]; v_[346] = cos(v_[346]); v_[320] = v_[326] * v_[331] + v_[323] * v_[346]; v_[28] = 2. * v_[320]; v_[340] = v_[336] * v_[340]; v_[342] = v_[342] * v_[361]; v_[361] = v_[340] * v_[346] - v_[342] * v_[331]; v_[336] = v_[28] * v_[361]; v_[342] = v_[342] * v_[346] + v_[340] * v_[331]; v_[340] = 2. * v_[342]; v_[346] = v_[326] * v_[346] - v_[323] * v_[331]; v_[323] = v_[340] * v_[346]; v_[331] = 0.5 * x_in[0]; v_[326] = cos(v_[331]); v_[350] = 0.5 * x_in[1]; v_[337] = sin(v_[350]); v_[322] = v_[326] * v_[337]; v_[324] = 0.5 * x_in[2]; v_[321] = cos(v_[324]); v_[331] = sin(v_[331]); v_[350] = cos(v_[350]); v_[319] = v_[331] * v_[350]; v_[324] = sin(v_[324]); v_[338] = v_[322] * v_[321] - v_[319] * v_[324]; v_[362] = 2. * v_[338]; v_[363] = v_[362] * v_[338]; v_[319] = v_[319] * v_[321] + v_[322] * v_[324]; v_[322] = 2. * v_[319]; v_[364] = v_[322] * v_[319]; v_[365] = 0.33 * v_[37]; v_[366] = sin(x_in[12]); v_[367] = 0.207 + 0.33 * v_[366] * v_[94] * v_[77] - 0.33 * v_[366] * v_[76] * v_[79] - 0.35 * v_[366] * v_[76] - 0.08 * v_[366]; v_[368] = -0.3735 + -0.33 * v_[76] * v_[77] - 0.33 * v_[94] * v_[79] - 0.35 * v_[94]; v_[365] = (v_[365] * v_[76] * v_[77] + 0.33 * v_[37] * v_[94] * v_[79]) * x_in[32] + (v_[365] * v_[94] * v_[79] + 0.33 * v_[37] * v_[76] * v_[77] + 0.35 * v_[37] * v_[94]) * x_in[31] + (0.33 * v_[366] * v_[76] * v_[79] + -0.33 * v_[366] * v_[94] * v_[77] + 0.35 * v_[366] * v_[76] + 0.08 * v_[366]) * x_in[30] + x_in[23] + x_in[18] * v_[367] - x_in[19] * v_[368]; v_[350] = v_[326] * v_[350]; v_[331] = v_[331] * v_[337]; v_[337] = v_[350] * v_[324] + v_[331] * v_[321]; v_[326] = 2. * v_[337]; v_[338] = v_[326] * v_[338]; v_[331] = v_[350] * v_[321] - v_[331] * v_[324]; v_[322] = v_[322] * v_[331]; v_[350] = 0.33 * v_[366]; v_[324] = 0.33 * v_[37] * v_[94] * v_[77] - 0.33 * v_[37] * v_[76] * v_[79] - 0.35 * v_[37] * v_[76] - 0.08 * v_[37]; v_[350] = (v_[350] * v_[76] * v_[77] + 0.33 * v_[366] * v_[94] * v_[79]) * x_in[32] + (v_[350] * v_[94] * v_[79] + 0.33 * v_[366] * v_[76] * v_[77] + 0.35 * v_[366] * v_[94]) * x_in[31] + (0.33 * v_[37] * v_[94] * v_[77] - 0.33 * v_[37] * v_[76] * v_[79] - 0.35 * v_[37] * v_[76] - 0.08 * v_[37]) * x_in[30] + x_in[22] + x_in[20] * v_[368] - x_in[18] * v_[324]; v_[368] = v_[326] * v_[319]; v_[321] = v_[362] * v_[331]; v_[324] = (0.33 * v_[94] * v_[77] - 0.33 * v_[76] * v_[79]) * x_in[32] + (0.33 * v_[94] * v_[77] - 0.33 * v_[76] * v_[79] - 0.35 * v_[76]) * x_in[31] + x_in[21] + x_in[19] * v_[324] - x_in[20] * v_[367]; v_[367] = 0.5 * x_in[0]; v_[369] = cos(v_[367]); v_[370] = 0.5 * x_in[1]; v_[371] = sin(v_[370]); v_[372] = v_[369] * v_[371]; v_[373] = 0.5 * x_in[2]; v_[374] = cos(v_[373]); v_[367] = sin(v_[367]); v_[370] = cos(v_[370]); v_[375] = v_[367] * v_[370]; v_[373] = sin(v_[373]); v_[376] = v_[372] * v_[374] - v_[375] * v_[373]; v_[377] = 2. * v_[376]; v_[375] = v_[375] * v_[374] + v_[372] * v_[373]; v_[372] = 2. * v_[375]; v_[370] = v_[369] * v_[370]; v_[367] = v_[367] * v_[371]; v_[371] = 2. * (v_[370] * v_[373] + v_[367] * v_[374]); v_[367] = v_[370] * v_[374] - v_[367] * v_[373]; v_[367] = (1 - v_[377] * v_[376] - v_[372] * v_[375]) * (0.33 * v_[37] * v_[94] * v_[77] - 0.33 * v_[37] * v_[76] * v_[79] - 0.35 * v_[37] * v_[76] - 0.08 * v_[37]) + (v_[371] * v_[376] + v_[372] * v_[367]) * (0.207 + 0.33 * v_[366] * v_[94] * v_[77] - 0.33 * v_[366] * v_[76] * v_[79] - 0.35 * v_[366] * v_[76] - 0.08 * v_[366]) + (v_[371] * v_[375] - v_[377] * v_[367]) * (-0.3735 + -0.33 * v_[76] * v_[77] - 0.33 * v_[94] * v_[79] - 0.35 * v_[94]) + x_in[5]; v_[371] = 1 / (1 + exp(100. * v_[367])); v_[367] = -1000. * ((1 - v_[363] - v_[364]) * v_[365] + (v_[338] + v_[322]) * v_[350] + (v_[368] - v_[321]) * v_[324]) * v_[371] + 5000. * exp(-100. * (v_[367] + 0.02)); v_[320] = v_[28] * v_[320]; v_[340] = v_[340] * v_[342]; v_[337] = v_[326] * v_[337]; v_[319] = v_[362] * v_[319]; v_[331] = v_[326] * v_[331]; v_[322] = -1000. * ((v_[338] - v_[322]) * v_[365] + (1 - v_[337] - v_[364]) * v_[350] + (v_[319] + v_[331]) * v_[324]) * v_[371]; v_[338] = 2. * v_[361]; v_[364] = v_[338] * v_[342]; v_[326] = v_[28] * v_[346]; v_[331] = -1000. * ((v_[368] + v_[321]) * v_[365] + (v_[319] - v_[331]) * v_[350] + (1 - v_[337] - v_[363]) * v_[324]) * v_[371]; v_[319] = (v_[336] + v_[323]) * v_[367] + (1 - v_[320] - v_[340]) * v_[322] + (v_[364] - v_[326]) * v_[331]; v_[337] = 0.207 + 0.33 * v_[120] * v_[94] * v_[77] - 0.33 * v_[120] * v_[76] * v_[79] - 0.35 * v_[120] * v_[76] - 0.08 * v_[120]; v_[342] = v_[28] * v_[342]; v_[346] = v_[338] * v_[346]; v_[338] = v_[338] * v_[361]; v_[326] = (v_[342] - v_[346]) * v_[367] + (v_[364] + v_[326]) * v_[322] + (1 - v_[320] - v_[338]) * v_[331]; v_[364] = v_[343] * v_[319] - v_[337] * v_[326]; v_[320] = 0.33 * v_[37] * v_[94] * v_[77] - 0.33 * v_[37] * v_[76] * v_[79] - 0.35 * v_[37] * v_[76] - 0.08 * v_[37]; v_[338] = (1 - v_[338] - v_[340]) * v_[367] + (v_[336] - v_[323]) * v_[322] + (v_[342] + v_[346]) * v_[331]; v_[343] = v_[320] * v_[326] - v_[343] * v_[338]; v_[27] = v_[306] * v_[27] - v_[22] * v_[330]; v_[346] = 0.110125 * (v_[308] - v_[11]) + 0.025941 * v_[358] - (0 - v_[120]) * v_[364] - v_[37] * v_[343] - (-0.35 * v_[37] * v_[94] - 0.3735 * v_[37]) * v_[338] - (-0.35 * v_[120] * v_[94] - 0.3735 * v_[120]) * v_[319] - (0.08 + 0.35 * v_[76] - 0.207 * v_[120]) * v_[326] - -0.000881 * v_[27]; v_[342] = x_in[44] - v_[346]; v_[331] = v_[37] * v_[76]; v_[322] = v_[37] * v_[94]; v_[340] = v_[331] * v_[77] + v_[322] * v_[79]; v_[367] = v_[120] * v_[76]; v_[323] = v_[120] * v_[94]; v_[336] = v_[367] * v_[77] + v_[323] * v_[79]; v_[361] = v_[94] * v_[77] - v_[76] * v_[79]; v_[28] = v_[22] * v_[22]; v_[308] = -0.881 * (v_[11] - v_[308]) + 0.110125 * v_[358] + 0.417781802643138 * v_[355] + -0.00370574557885489 * v_[354] + (0.110125 * v_[342]) / 0.026181 - v_[340] * v_[338] - v_[336] * v_[319] - v_[361] * v_[326] - -0.000881 * (v_[28] + v_[8] * v_[8]); v_[322] = v_[322] * v_[77] - v_[331] * v_[79]; v_[323] = v_[323] * v_[77] - v_[367] * v_[79]; v_[367] = v_[94] * v_[79]; v_[28] = 0.881 * v_[27] + -0.000881 * v_[358] + -0.00370574557885489 * v_[355] + 0.880970354035369 * v_[354] + (0.000881 * v_[342]) / 0.026181 - v_[322] * v_[338] - v_[323] * v_[319] - ((0 - v_[76]) * v_[77] - v_[367]) * v_[326] - 0.110125 * (v_[28] + v_[306] * v_[306]); v_[27] = (0 - v_[36]) * x_in[31]; v_[358] = v_[1] * x_in[31]; v_[331] = (0 - v_[360]) * x_in[31]; v_[11] = v_[14] * x_in[31]; v_[371] = v_[36] * v_[215]; v_[54] = v_[1] * v_[54] - v_[215] * v_[14]; v_[324] = v_[1] * v_[215]; v_[321] = v_[1] * v_[1]; v_[368] = v_[36] * v_[36]; v_[346] = 0.47422804 * (v_[312] - v_[237]) + 0.081641 * v_[10] + 2.1e-05 * v_[371] + (0.026181 * v_[342]) / 0.026181 + v_[346] + v_[345] * v_[308] + v_[78] * v_[28] - 0.0825825 * v_[54] - -0.000102 * v_[324] - -0.007418 * (v_[321] - v_[368]); v_[350] = x_in[43] - v_[346]; v_[215] = v_[215] * v_[215]; v_[312] = -3.146 * (v_[237] - v_[312]) + 0.47422804 * v_[10] + v_[79] * v_[308] + v_[77] * v_[28] + v_[96] * v_[27] + v_[93] * v_[358] + v_[110] * v_[331] + v_[109] * v_[11] + (v_[91] * v_[350]) / v_[81] - 0.0825825 * (v_[215] + v_[368]); v_[54] = 3.146 * v_[54] + 0.0825825 * v_[10] + v_[314] * v_[308] + v_[79] * v_[28] + v_[98] * v_[27] + v_[87] * v_[358] + v_[109] * v_[331] + v_[84] * v_[11] + (v_[88] * v_[350]) / v_[81] - 0.47422804 * (v_[215] + v_[321]); v_[28] = (0 - v_[257]) * x_in[30]; v_[308] = v_[250] * x_in[30]; v_[109] = (0 - v_[256]) * x_in[30]; v_[84] = v_[294] * x_in[30]; v_[237] = v_[257] * v_[250]; v_[110] = v_[310] * v_[256]; v_[309] = v_[257] * v_[309]; v_[310] = v_[257] * v_[310]; v_[365] = (0 - v_[8]) * x_in[32]; v_[320] = v_[337] * v_[338] - v_[320] * v_[319]; v_[337] = 0.207 * v_[37]; v_[330] = v_[8] * v_[330] - v_[306] * v_[311]; v_[8] = v_[8] * v_[22]; v_[361] = 0.026409 * v_[365] - v_[340] * v_[364] - v_[336] * v_[343] - v_[361] * v_[320] - (((0.08 * v_[120] - 0.207) * v_[94] - 0.3735 * v_[120] * v_[76]) * v_[77] + ((0.207 - 0.08 * v_[120]) * v_[76] + -0.3735 * v_[120] * v_[94] - 0.35 * v_[120]) * v_[79]) * v_[338] - ((0.3735 * v_[37] * v_[76] - 0.08 * v_[37] * v_[94]) * v_[77] + (0.08 * v_[37] * v_[76] + 0.3735 * v_[37] * v_[94] + 0.35 * v_[37]) * v_[79]) * v_[319] - (v_[337] * v_[76] * v_[77] + 0.207 * v_[37] * v_[94] * v_[79]) * v_[326] - 0.110125 * v_[330] - 0.025713 * v_[8]; v_[22] = v_[306] * v_[22]; v_[306] = v_[306] * x_in[32]; v_[337] = -0.000227999999999999 * v_[22] + -0.000881 * v_[330] + 0.000468 * v_[306] - v_[322] * v_[364] - v_[323] * v_[343] - ((0 - v_[76]) * v_[77] - v_[367]) * v_[320] - (((0.207 - 0.08 * v_[120]) * v_[76] + -0.3735 * v_[120] * v_[94] - 0.35 * v_[120]) * v_[77] + ((0.207 - 0.08 * v_[120]) * v_[94] + 0.3735 * v_[120] * v_[76]) * v_[79]) * v_[338] - ((0.08 * v_[37] * v_[76] + 0.3735 * v_[37] * v_[94] + 0.35 * v_[37]) * v_[77] + (0.08 * v_[37] * v_[94] - 0.3735 * v_[37] * v_[76]) * v_[79]) * v_[319] - (v_[337] * v_[94] * v_[77] - 0.207 * v_[37] * v_[76] * v_[79]) * v_[326]; v_[306] = 0.110125 * v_[8] + 0.881 * v_[330] + -0.000881 * v_[22] + -0.110125 * v_[365] + -0.000881 * v_[306] - (0 - v_[120]) * v_[338] - v_[37] * v_[319]; v_[14] = v_[36] * v_[14] - v_[1] * v_[360]; v_[368] = -0.007418 * v_[324] + -0.000102 * (v_[215] - v_[368]) + v_[79] * v_[361] + v_[77] * v_[337] + -0.35 * v_[306] + v_[101] * v_[27] + v_[102] * v_[358] + v_[96] * v_[331] + v_[98] * v_[11] + (2.1e-05 * v_[350]) / v_[81] - 0.47422804 * v_[14] - 0.084376 * v_[371] - 2.1e-05 * v_[10]; v_[337] = 0.002735 * v_[324] + 0.0825825 * v_[14] + -0.000102 * v_[10] + v_[89] * v_[361] + v_[79] * v_[337] + v_[102] * v_[27] + v_[100] * v_[358] + v_[93] * v_[331] + v_[87] * v_[11] + (-0.000102 * v_[350]) / v_[81] - -0.007418 * v_[371] - 2.1e-05 * (v_[215] - v_[321]); v_[14] = 0.47422804 * v_[371] + 3.146 * v_[14] + 0.0825825 * v_[324] + v_[306] + v_[90] * v_[27] + v_[83] * v_[358]; v_[306] = v_[250] * v_[250]; v_[324] = 0.009466 * v_[237] + 0.1466472 * (v_[110] - v_[309]) + -5.1e-05 * v_[310] + v_[76] * v_[368] + v_[94] * v_[337] + -0.08 * v_[14] - 0.022734 * v_[307] - 3.6e-05 * (v_[306] - v_[359]); v_[371] = x_in[42] - v_[324]; v_[256] = v_[257] * v_[294] - v_[250] * v_[256]; v_[346] = -0.5824264 * v_[39] + 3.6e-05 * v_[307] + 0.022734 * (v_[144] - v_[359]) + v_[12] * v_[312] + v_[104] * v_[54] + -1 * (v_[346] + (v_[81] * v_[350]) / v_[81]) + v_[126] * v_[28] + v_[80] * v_[308] + v_[128] * v_[109] + v_[125] * v_[84] + (v_[145] * v_[371]) / v_[113] - 0.1466472 * v_[256] - -0.123672 * v_[310] - -5.1e-05 * v_[237]; v_[337] = -0.133138 * v_[307] + -0.5824264 * (v_[309] - v_[110]) + 0.022734 * v_[237] + v_[95] * v_[368] + v_[76] * v_[337] + v_[80] * v_[28] + v_[107] * v_[308] + v_[138] * v_[109] + v_[117] * v_[84] + (v_[82] * v_[371]) / v_[113] - 3.6e-05 * v_[310] - -5.1e-05 * (v_[144] - v_[306]); v_[256] = 3.44 * v_[256] + 0.1466472 * v_[310] + v_[76] * v_[312] + v_[94] * v_[54] + v_[106] * v_[28] + v_[85] * v_[308] + v_[141] * v_[109] + v_[135] * v_[84] + (v_[114] * v_[371]) / v_[113] - -0.5824264 * (v_[306] + v_[359]); v_[14] = 0.1466472 * v_[237] + -3.44 * (v_[309] - v_[110]) + -0.5824264 * v_[307] + -1 * v_[14] + v_[128] * v_[28] + v_[138] * v_[308] + v_[140] * v_[109] + v_[129] * v_[84] + (v_[127] * v_[371]) / v_[113]; v_[306] = 3.44 * v_[39] + -0.5824264 * v_[310] + v_[31] * v_[312] + v_[76] * v_[54] + v_[125] * v_[28] + v_[117] * v_[308] + v_[129] * v_[109] + v_[133] * v_[84] + (v_[115] * v_[371]) / v_[113] - 0.1466472 * (v_[144] + v_[306]); v_[310] = v_[108] * x_in[19] + v_[191] * x_in[20]; v_[54] = 0.207 * x_in[20] + x_in[21]; v_[312] = x_in[18] + x_in[27]; v_[144] = -0.207 * v_[191]; v_[39] = v_[194] * x_in[20] + v_[192] * x_in[19] + v_[144] * x_in[18] + v_[191] * x_in[23] + v_[108] * x_in[22]; v_[309] = v_[310] * v_[54] - v_[312] * v_[39]; v_[110] = v_[310] * v_[312]; v_[237] = v_[312] * v_[312]; v_[307] = v_[191] * x_in[19] + v_[193] * x_in[20]; v_[359] = v_[307] * v_[307]; v_[368] = v_[310] + x_in[28]; v_[80] = 0 - v_[171]; v_[107] = 0.3735 * v_[191]; v_[126] = 0.3735 * v_[108]; v_[257] = 0 - v_[108]; v_[294] = v_[107] * x_in[20] + v_[126] * x_in[19] + v_[190] * x_in[18] + v_[257] * x_in[23] + v_[191] * x_in[22]; v_[250] = v_[80] * v_[54] + v_[92] * v_[294] + v_[177] * v_[310]; v_[90] = v_[368] * v_[250]; v_[83] = v_[92] * v_[307] + v_[172] * v_[312]; v_[361] = 0.08 * v_[312] + v_[39]; v_[215] = v_[83] * v_[361]; v_[321] = v_[172] * v_[307] + v_[175] * v_[312]; v_[10] = v_[83] * v_[321]; v_[100] = v_[83] * v_[368]; v_[102] = v_[368] + x_in[29]; v_[87] = 0 - v_[92]; v_[93] = 0 - v_[171]; v_[89] = 0.08 * v_[92]; v_[101] = v_[87] * v_[54] + v_[93] * v_[294] + v_[89] * v_[310]; v_[98] = v_[149] * v_[101] + v_[147] * v_[250] + v_[152] * v_[368]; v_[96] = v_[102] * v_[98]; v_[1] = v_[147] * v_[83] + v_[149] * v_[321]; v_[360] = -0.35 * v_[321] + v_[361]; v_[36] = v_[1] * v_[360]; v_[22] = v_[150] * v_[83] + v_[147] * v_[321]; v_[8] = v_[1] * v_[22]; v_[330] = 0.3735 + -0.33 * v_[92] * v_[149] - 0.33 * v_[171] * v_[147] - 0.35 * v_[171]; v_[365] = 0.5 * x_in[0]; v_[338] = cos(v_[365]); v_[319] = 0.5 * x_in[1]; v_[37] = cos(v_[319]); v_[320] = v_[338] * v_[37]; v_[367] = 0.5 * x_in[2]; v_[323] = sin(v_[367]); v_[365] = sin(v_[365]); v_[319] = sin(v_[319]); v_[322] = v_[365] * v_[319]; v_[367] = cos(v_[367]); v_[343] = v_[320] * v_[323] + v_[322] * v_[367]; v_[364] = 2. * v_[343]; v_[319] = v_[338] * v_[319]; v_[365] = v_[365] * v_[37]; v_[37] = v_[319] * v_[367] - v_[365] * v_[323]; v_[338] = v_[364] * v_[37]; v_[365] = v_[365] * v_[367] + v_[319] * v_[323]; v_[319] = 2. * v_[365]; v_[367] = v_[320] * v_[367] - v_[322] * v_[323]; v_[322] = v_[319] * v_[367]; v_[323] = 0.5 * x_in[1]; v_[320] = sin(v_[323]); v_[326] = v_[341] * v_[320]; v_[336] = 0.5 * x_in[2]; v_[340] = cos(v_[336]); v_[323] = cos(v_[323]); v_[311] = v_[351] * v_[323]; v_[336] = sin(v_[336]); v_[363] = v_[326] * v_[340] - v_[311] * v_[336]; v_[362] = 2. * v_[363]; v_[372] = v_[362] * v_[363]; v_[311] = v_[311] * v_[340] + v_[326] * v_[336]; v_[326] = 2. * v_[311]; v_[375] = v_[326] * v_[311]; v_[377] = 0.33 * v_[108]; v_[376] = sin(x_in[11]); v_[366] = sin(x_in[10]); v_[370] = -0.33 * v_[191]; v_[373] = -0.207 + 0.33 * v_[191] * v_[92] * v_[147] + -0.33 * v_[191] * v_[366] * v_[376] + 0.35 * v_[191] * v_[92] + 0.08 * v_[191]; v_[374] = 0.3735 + -0.33 * v_[92] * v_[376] - 0.33 * v_[366] * v_[147] - 0.35 * v_[366]; v_[377] = (v_[377] * v_[92] * v_[376] + 0.33 * v_[108] * v_[366] * v_[147]) * x_in[29] + (v_[377] * v_[366] * v_[147] + 0.33 * v_[108] * v_[92] * v_[376] + 0.35 * v_[108] * v_[366]) * x_in[28] + (0.33 * v_[191] * v_[92] * v_[147] + v_[370] * v_[366] * v_[376] + 0.35 * v_[191] * v_[92] + 0.08 * v_[191]) * x_in[27] + x_in[23] + x_in[18] * v_[373] - x_in[19] * v_[374]; v_[323] = v_[341] * v_[323]; v_[320] = v_[351] * v_[320]; v_[351] = v_[323] * v_[336] + v_[320] * v_[340]; v_[341] = 2. * v_[351]; v_[363] = v_[341] * v_[363]; v_[320] = v_[323] * v_[340] - v_[320] * v_[336]; v_[326] = v_[326] * v_[320]; v_[323] = 0.33 * v_[108] * v_[366] * v_[376] - 0.33 * v_[108] * v_[92] * v_[147] - 0.35 * v_[108] * v_[92] - 0.08 * v_[108]; v_[374] = (-0.33 * v_[191] * v_[92] * v_[376] - 0.33 * v_[191] * v_[366] * v_[147]) * x_in[29] + (v_[370] * v_[92] * v_[376] - 0.33 * v_[191] * v_[366] * v_[147] - 0.35 * v_[191] * v_[366]) * x_in[28] + (0.33 * v_[108] * v_[92] * v_[147] + -0.33 * v_[108] * v_[366] * v_[376] + 0.35 * v_[108] * v_[92] + 0.08 * v_[108]) * x_in[27] + x_in[22] + x_in[20] * v_[374] - x_in[18] * v_[323]; v_[370] = v_[341] * v_[311]; v_[336] = v_[362] * v_[320]; v_[323] = (0.33 * v_[366] * v_[376] - 0.33 * v_[92] * v_[147]) * x_in[29] + (0.33 * v_[366] * v_[376] - 0.33 * v_[92] * v_[147] - 0.35 * v_[92]) * x_in[28] + x_in[21] + x_in[19] * v_[323] - x_in[20] * v_[373]; v_[373] = 0.5 * x_in[1]; v_[340] = sin(v_[373]); v_[369] = v_[3] * v_[340]; v_[378] = 0.5 * x_in[2]; v_[379] = cos(v_[378]); v_[373] = cos(v_[373]); v_[380] = v_[4] * v_[373]; v_[378] = sin(v_[378]); v_[381] = v_[369] * v_[379] - v_[380] * v_[378]; v_[382] = 2. * v_[381]; v_[380] = v_[380] * v_[379] + v_[369] * v_[378]; v_[369] = 2. * v_[380]; v_[373] = v_[3] * v_[373]; v_[340] = v_[4] * v_[340]; v_[4] = 2. * (v_[373] * v_[378] + v_[340] * v_[379]); v_[340] = v_[373] * v_[379] - v_[340] * v_[378]; v_[340] = (1 - v_[382] * v_[381] - v_[369] * v_[380]) * (0.33 * v_[108] * v_[366] * v_[376] - 0.33 * v_[108] * v_[92] * v_[147] - 0.35 * v_[108] * v_[92] - 0.08 * v_[108]) + (v_[4] * v_[381] + v_[369] * v_[340]) * (-0.207 + 0.33 * v_[191] * v_[92] * v_[147] + -0.33 * v_[191] * v_[366] * v_[376] + 0.35 * v_[191] * v_[92] + 0.08 * v_[191]) + (v_[4] * v_[380] - v_[382] * v_[340]) * (0.3735 + -0.33 * v_[92] * v_[376] - 0.33 * v_[366] * v_[147] - 0.35 * v_[366]) + x_in[5]; v_[4] = 1 / (1 + exp(100. * v_[340])); v_[340] = -1000. * ((1 - v_[372] - v_[375]) * v_[377] + (v_[363] + v_[326]) * v_[374] + (v_[370] - v_[336]) * v_[323]) * v_[4] + 5000. * exp(-100. * (v_[340] + 0.02)); v_[343] = v_[364] * v_[343]; v_[319] = v_[319] * v_[365]; v_[351] = v_[341] * v_[351]; v_[311] = v_[362] * v_[311]; v_[320] = v_[341] * v_[320]; v_[326] = -1000. * ((v_[363] - v_[326]) * v_[377] + (1 - v_[351] - v_[375]) * v_[374] + (v_[311] + v_[320]) * v_[323]) * v_[4]; v_[363] = 2. * v_[37]; v_[375] = v_[363] * v_[365]; v_[341] = v_[364] * v_[367]; v_[320] = -1000. * ((v_[370] + v_[336]) * v_[377] + (v_[311] - v_[320]) * v_[374] + (1 - v_[351] - v_[372]) * v_[323]) * v_[4]; v_[311] = (v_[338] + v_[322]) * v_[340] + (1 - v_[343] - v_[319]) * v_[326] + (v_[375] - v_[341]) * v_[320]; v_[351] = -0.207 + 0.33 * v_[191] * v_[92] * v_[147] + -0.33 * v_[191] * v_[171] * v_[149] + 0.35 * v_[191] * v_[92] + 0.08 * v_[191]; v_[365] = v_[364] * v_[365]; v_[367] = v_[363] * v_[367]; v_[363] = v_[363] * v_[37]; v_[341] = (v_[365] - v_[367]) * v_[340] + (v_[375] + v_[341]) * v_[326] + (1 - v_[343] - v_[363]) * v_[320]; v_[375] = v_[330] * v_[311] - v_[351] * v_[341]; v_[343] = 0.33 * v_[108] * v_[171] * v_[149] - 0.33 * v_[108] * v_[92] * v_[147] - 0.35 * v_[108] * v_[92] - 0.08 * v_[108]; v_[363] = (1 - v_[363] - v_[319]) * v_[340] + (v_[338] - v_[322]) * v_[326] + (v_[365] + v_[367]) * v_[320]; v_[330] = v_[343] * v_[341] - v_[330] * v_[363]; v_[367] = 0 - v_[149]; v_[365] = 0.35 * v_[147]; v_[320] = v_[147] * v_[101] + v_[367] * v_[250] + v_[365] * v_[368]; v_[360] = v_[22] * v_[360] - v_[102] * v_[320]; v_[326] = 0.1104774 * (v_[96] - v_[36]) + 0.025941 * v_[8] - v_[191] * v_[375] - v_[108] * v_[330] - (0.3735 * v_[108] - 0.35 * v_[108] * v_[171]) * v_[363] - (0.35 * v_[191] * v_[171] - 0.3735 * v_[191]) * v_[311] - (0.08 + 0.35 * v_[92] - 0.207 * v_[191]) * v_[341] - 0.0004405 * v_[360]; v_[319] = x_in[41] - v_[326]; v_[340] = v_[22] * v_[102]; v_[322] = (0 - v_[98]) * x_in[29]; v_[338] = v_[320] * x_in[29]; v_[37] = v_[22] * x_in[29]; v_[364] = v_[108] * v_[92]; v_[4] = v_[108] * v_[171]; v_[323] = v_[364] * v_[149] + v_[4] * v_[147]; v_[336] = v_[191] * v_[171]; v_[370] = v_[336] * v_[147]; v_[374] = v_[171] * v_[149] - v_[92] * v_[147]; v_[377] = v_[102] * v_[102]; v_[372] = v_[1] * v_[1]; v_[362] = -0.881 * (v_[36] - v_[96]) + 0.1104774 * v_[8] + -8.81e-05 * v_[340] + 0.414812462825713 * v_[322] + 0.00185880198235362 * v_[338] + 8.81e-05 * v_[37] + (0.1104774 * v_[319]) / 0.026181 - v_[323] * v_[363] - ((0 - v_[191]) * v_[92] * v_[149] - v_[370]) * v_[311] - v_[374] * v_[341] - 0.0004405 * (v_[377] + v_[372]); v_[102] = v_[1] * v_[102]; v_[369] = (0 - v_[1]) * x_in[29]; v_[4] = v_[4] * v_[149] - v_[364] * v_[147]; v_[336] = v_[191] * v_[92] * v_[147] - v_[336] * v_[149]; v_[364] = (0 - v_[92]) * v_[149] - v_[171] * v_[147]; v_[380] = v_[22] * v_[22]; v_[377] = 0.881 * v_[360] + -8.81e-05 * v_[102] + 0.0004405 * v_[8] + 0.00185880198235362 * v_[322] + 0.880992588508842 * v_[338] + -8.81e-05 * v_[369] + (-0.0004405 * v_[319]) / 0.026181 - v_[4] * v_[363] - v_[336] * v_[311] - v_[364] * v_[341] - 0.1104774 * (v_[377] + v_[380]); v_[361] = v_[321] * v_[361] - v_[368] * v_[101]; v_[8] = v_[321] * v_[368]; v_[382] = v_[321] * v_[321]; v_[381] = v_[83] * v_[83]; v_[326] = 0.47422804 * (v_[90] - v_[215]) + 0.081641 * v_[10] + 2.1e-05 * v_[100] + (0.026181 * v_[319]) / 0.026181 + v_[326] + v_[365] * v_[362] + v_[152] * v_[377] - -0.0825825 * v_[361] - 0.000102 * v_[8] - 0.007418 * (v_[382] - v_[381]); v_[366] = x_in[40] - v_[326]; v_[376] = (0 - v_[83]) * x_in[28]; v_[373] = v_[321] * x_in[28]; v_[378] = (0 - v_[250]) * x_in[28]; v_[379] = v_[101] * x_in[28]; v_[368] = v_[368] * v_[368]; v_[215] = -3.146 * (v_[215] - v_[90]) + 0.47422804 * v_[10] + v_[147] * v_[362] + v_[149] * v_[377] + v_[157] * v_[376] + v_[170] * v_[373] + v_[182] * v_[378] + v_[181] * v_[379] + (v_[168] * v_[366]) / v_[159] - -0.0825825 * (v_[368] + v_[381]); v_[361] = 3.146 * v_[361] + -0.0825825 * v_[10] + v_[367] * v_[362] + v_[147] * v_[377] + v_[174] * v_[376] + v_[151] * v_[373] + v_[181] * v_[378] + v_[162] * v_[379] + (v_[166] * v_[366]) / v_[159] - 0.47422804 * (v_[368] + v_[382]); v_[377] = (0 - v_[307]) * x_in[27]; v_[362] = v_[310] * x_in[27]; v_[181] = (0 - v_[294]) * x_in[27]; v_[162] = v_[39] * x_in[27]; v_[90] = v_[307] * v_[310]; v_[182] = v_[312] * v_[294]; v_[54] = v_[307] * v_[54]; v_[312] = v_[307] * v_[312]; v_[343] = v_[351] * v_[363] - v_[343] * v_[311]; v_[320] = v_[1] * v_[320] - v_[22] * v_[98]; v_[374] = -8.81e-05 * v_[360] + -8.81e-05 * v_[338] + 0.026409 * v_[369] - v_[323] * v_[375] - ((0 - v_[191]) * v_[92] * v_[149] - v_[370]) * v_[330] - v_[374] * v_[343] - (((0.207 - 0.08 * v_[191]) * v_[171] - 0.3735 * v_[191] * v_[92]) * v_[149] + ((0.08 * v_[191] - 0.207) * v_[92] + -0.3735 * v_[191] * v_[171] + 0.35 * v_[191]) * v_[147]) * v_[363] - ((-0.08 * v_[108] * v_[171] - 0.3735 * v_[108] * v_[92]) * v_[149] + (0.08 * v_[108] * v_[92] + -0.3735 * v_[108] * v_[171] + 0.35 * v_[108]) * v_[147]) * v_[311] - (-0.207 * v_[108] * v_[92] * v_[149] - 0.207 * v_[108] * v_[171] * v_[147]) * v_[341] - 0.1104774 * v_[320] - 0.025713 * v_[102]; v_[343] = 0.0004405 * v_[320] + -8.81e-05 * (v_[36] - v_[96]) + -0.000227999999999999 * v_[340] + 8.81e-05 * v_[322] + 0.000468 * v_[37] - v_[4] * v_[375] - v_[336] * v_[330] - v_[364] * v_[343] - (((0.08 * v_[191] - 0.207) * v_[92] + -0.3735 * v_[191] * v_[171] + 0.35 * v_[191]) * v_[149] + ((0.08 * v_[191] - 0.207) * v_[171] + 0.3735 * v_[191] * v_[92]) * v_[147]) * v_[363] - ((0.08 * v_[108] * v_[92] + -0.3735 * v_[108] * v_[171] + 0.35 * v_[108]) * v_[149] + (0.08 * v_[108] * v_[171] + 0.3735 * v_[108] * v_[92]) * v_[147]) * v_[311] - (0.207 * v_[108] * v_[92] * v_[147] - 0.207 * v_[108] * v_[171] * v_[149]) * v_[341]; v_[320] = 0.881 * v_[320] + 0.1104774 * v_[102] + 0.0004405 * v_[340] + -0.1104774 * v_[369] + 0.0004405 * v_[37] - v_[191] * v_[363] - v_[108] * v_[311] - -8.81e-05 * (v_[380] + v_[372]); v_[101] = v_[83] * v_[101] - v_[321] * v_[250]; v_[381] = 0.007418 * v_[8] + 0.000102 * (v_[368] - v_[381]) + v_[147] * v_[374] + v_[149] * v_[343] + -0.35 * v_[320] + v_[165] * v_[376] + v_[154] * v_[373] + v_[157] * v_[378] + v_[174] * v_[379] + (v_[153] * v_[366]) / v_[159] - 0.47422804 * v_[101] - 0.084376 * v_[100] - 2.1e-05 * v_[10]; v_[343] = 0.002735 * v_[8] + -0.0825825 * v_[101] + 0.000102 * v_[10] + v_[150] * v_[374] + v_[147] * v_[343] + v_[154] * v_[376] + v_[148] * v_[373] + v_[170] * v_[378] + v_[151] * v_[379] + (v_[158] * v_[366]) / v_[159] - 0.007418 * v_[100] - 2.1e-05 * (v_[368] - v_[382]); v_[101] = 0.47422804 * v_[100] + 3.146 * v_[101] + -0.0825825 * v_[8] + v_[320] + v_[167] * v_[376] + v_[161] * v_[373]; v_[320] = v_[310] * v_[310]; v_[8] = 0.009466 * v_[90] + 0.1466472 * (v_[182] - v_[54]) + -5.1e-05 * v_[312] + v_[175] * v_[381] + v_[172] * v_[343] + 0.08 * v_[101] - 0.022734 * v_[110] - 3.6e-05 * (v_[320] - v_[359]); v_[100] = x_in[39] - v_[8]; v_[294] = v_[307] * v_[39] - v_[310] * v_[294]; v_[326] = -0.5824264 * v_[309] + 3.6e-05 * v_[110] + 0.022734 * (v_[237] - v_[359]) + (v_[159] * v_[366]) / v_[159] + v_[326] + v_[89] * v_[215] + v_[177] * v_[361] + v_[197] * v_[377] + v_[155] * v_[362] + v_[199] * v_[181] + v_[196] * v_[162] + (v_[217] * v_[100]) / v_[185] - 0.1466472 * v_[294] - -0.123672 * v_[312] - -5.1e-05 * v_[90]; v_[343] = -0.133138 * v_[110] + -0.5824264 * (v_[54] - v_[182]) + 0.022734 * v_[90] + v_[172] * v_[381] + v_[92] * v_[343] + v_[155] * v_[377] + v_[179] * v_[362] + v_[209] * v_[181] + v_[189] * v_[162] + (v_[160] * v_[100]) / v_[185] - 3.6e-05 * v_[312] - -5.1e-05 * (v_[237] - v_[320]); v_[294] = 3.44 * v_[294] + 0.1466472 * v_[312] + v_[87] * v_[215] + v_[80] * v_[361] + v_[178] * v_[377] + v_[163] * v_[362] + v_[212] * v_[181] + v_[206] * v_[162] + (v_[186] * v_[100]) / v_[185] - -0.5824264 * (v_[320] + v_[359]); v_[101] = 0.1466472 * v_[90] + -3.44 * (v_[54] - v_[182]) + -0.5824264 * v_[110] + v_[101] + v_[199] * v_[377] + v_[209] * v_[362] + v_[211] * v_[181] + v_[200] * v_[162] + (v_[198] * v_[100]) / v_[185]; v_[320] = 3.44 * v_[309] + -0.5824264 * v_[312] + v_[93] * v_[215] + v_[92] * v_[361] + v_[196] * v_[377] + v_[189] * v_[362] + v_[200] * v_[181] + v_[204] * v_[162] + (v_[187] * v_[100]) / v_[185] - 0.1466472 * (v_[237] + v_[320]); v_[312] = v_[169] * x_in[19] + v_[265] * x_in[20]; v_[361] = 0.207 * x_in[20] + -1 * x_in[21]; v_[215] = -1 * x_in[18] + x_in[24]; v_[237] = 0.207 * v_[265]; v_[309] = 0 - v_[180]; v_[54] = v_[268] * x_in[20] + v_[267] * x_in[19] + v_[237] * x_in[18] + v_[265] * x_in[23] + v_[309] * x_in[22]; v_[182] = v_[312] * v_[361] - v_[215] * v_[54]; v_[90] = v_[312] * v_[215]; v_[110] = v_[215] * v_[215]; v_[359] = v_[266] * x_in[19] + v_[169] * x_in[20]; v_[381] = v_[359] * v_[359]; v_[155] = -0.08 * v_[219]; v_[179] = v_[219] * v_[359] + v_[244] * v_[215]; v_[197] = -0.08 * v_[215] + -1 * v_[54]; v_[307] = v_[179] * v_[197]; v_[39] = -1 * v_[312] + x_in[25]; v_[310] = -0.3735 * v_[265]; v_[167] = 0.3735 * v_[180]; v_[161] = 0 - v_[180]; v_[374] = 0 - v_[265]; v_[368] = v_[310] * x_in[20] + v_[167] * x_in[19] + v_[263] * x_in[18] + v_[161] * x_in[23] + v_[374] * x_in[22]; v_[382] = v_[244] * v_[361] + v_[219] * v_[368] + v_[249] * v_[312]; v_[10] = v_[39] * v_[382]; v_[148] = v_[245] * v_[359] + v_[219] * v_[215]; v_[154] = v_[179] * v_[148]; v_[151] = v_[220] * v_[179] + v_[222] * v_[148]; v_[170] = -0.35 * v_[148] + v_[197]; v_[150] = v_[151] * v_[170]; v_[165] = v_[39] + x_in[26]; v_[174] = 0 - v_[244]; v_[157] = v_[219] * v_[361] + v_[174] * v_[368] + v_[155] * v_[312]; v_[321] = v_[222] * v_[157] + v_[220] * v_[382] + v_[225] * v_[39]; v_[83] = v_[165] * v_[321]; v_[250] = v_[223] * v_[179] + v_[220] * v_[148]; v_[380] = v_[151] * v_[250]; v_[369] = v_[250] * v_[165]; v_[102] = (0 - v_[321]) * x_in[26]; v_[372] = 0 - v_[222]; v_[37] = 0.35 * v_[220]; v_[340] = v_[220] * v_[157] + v_[372] * v_[382] + v_[37] * v_[39]; v_[363] = v_[340] * x_in[26]; v_[311] = v_[250] * x_in[26]; v_[364] = 0.3735 + -0.33 * v_[219] * v_[222] - 0.33 * v_[244] * v_[220] - 0.35 * v_[244]; v_[336] = 0.5 * x_in[0]; v_[4] = cos(v_[336]); v_[330] = 0.5 * x_in[1]; v_[375] = cos(v_[330]); v_[341] = v_[4] * v_[375]; v_[36] = 0.5 * x_in[2]; v_[96] = sin(v_[36]); v_[336] = sin(v_[336]); v_[330] = sin(v_[330]); v_[171] = v_[336] * v_[330]; v_[36] = cos(v_[36]); v_[370] = v_[341] * v_[96] + v_[171] * v_[36]; v_[323] = 2. * v_[370]; v_[330] = v_[4] * v_[330]; v_[336] = v_[336] * v_[375]; v_[375] = v_[330] * v_[36] - v_[336] * v_[96]; v_[4] = v_[323] * v_[375]; v_[336] = v_[336] * v_[36] + v_[330] * v_[96]; v_[330] = 2. * v_[336]; v_[36] = v_[341] * v_[36] - v_[171] * v_[96]; v_[171] = v_[330] * v_[36]; v_[96] = 0.5 * x_in[0]; v_[341] = cos(v_[96]); v_[360] = 0.5 * x_in[1]; v_[22] = sin(v_[360]); v_[1] = v_[341] * v_[22]; v_[98] = 0.5 * x_in[2]; v_[351] = cos(v_[98]); v_[96] = sin(v_[96]); v_[360] = cos(v_[360]); v_[3] = v_[96] * v_[360]; v_[98] = sin(v_[98]); v_[383] = v_[1] * v_[351] - v_[3] * v_[98]; v_[384] = 2. * v_[383]; v_[385] = v_[384] * v_[383]; v_[3] = v_[3] * v_[351] + v_[1] * v_[98]; v_[1] = 2. * v_[3]; v_[386] = v_[1] * v_[3]; v_[387] = 0.33 * v_[180]; v_[388] = sin(x_in[8]); v_[389] = sin(x_in[7]); v_[390] = sin(x_in[6]); v_[391] = 0.207 + 0.33 * v_[390] * v_[389] * v_[388] - 0.33 * v_[390] * v_[219] * v_[220] - 0.35 * v_[390] * v_[219] - 0.08 * v_[390]; v_[392] = 0.3735 + -0.33 * v_[219] * v_[388] - 0.33 * v_[389] * v_[220] - 0.35 * v_[389]; v_[387] = (v_[387] * v_[219] * v_[388] + 0.33 * v_[180] * v_[389] * v_[220]) * x_in[26] + (v_[387] * v_[389] * v_[220] + 0.33 * v_[180] * v_[219] * v_[388] + 0.35 * v_[180] * v_[389]) * x_in[25] + (0.33 * v_[390] * v_[219] * v_[220] + -0.33 * v_[390] * v_[389] * v_[388] + 0.35 * v_[390] * v_[219] + 0.08 * v_[390]) * x_in[24] + x_in[23] + x_in[18] * v_[391] - x_in[19] * v_[392]; v_[360] = v_[341] * v_[360]; v_[96] = v_[96] * v_[22]; v_[22] = v_[360] * v_[98] + v_[96] * v_[351]; v_[341] = 2. * v_[22]; v_[383] = v_[341] * v_[383]; v_[96] = v_[360] * v_[351] - v_[96] * v_[98]; v_[1] = v_[1] * v_[96]; v_[360] = 0.33 * v_[390]; v_[98] = 0.33 * v_[180] * v_[389] * v_[388] - 0.33 * v_[180] * v_[219] * v_[220] - 0.35 * v_[180] * v_[219] - 0.08 * v_[180]; v_[360] = (v_[360] * v_[219] * v_[388] + 0.33 * v_[390] * v_[389] * v_[220]) * x_in[26] + (v_[360] * v_[389] * v_[220] + 0.33 * v_[390] * v_[219] * v_[388] + 0.35 * v_[390] * v_[389]) * x_in[25] + (0.33 * v_[180] * v_[389] * v_[388] - 0.33 * v_[180] * v_[219] * v_[220] - 0.35 * v_[180] * v_[219] - 0.08 * v_[180]) * x_in[24] + x_in[22] + x_in[20] * v_[392] - x_in[18] * v_[98]; v_[392] = v_[341] * v_[3]; v_[351] = v_[384] * v_[96]; v_[98] = (0.33 * v_[389] * v_[388] - 0.33 * v_[219] * v_[220]) * x_in[26] + (0.33 * v_[389] * v_[388] - 0.33 * v_[219] * v_[220] - 0.35 * v_[219]) * x_in[25] + x_in[21] + x_in[19] * v_[98] - x_in[20] * v_[391]; v_[391] = 0.5 * x_in[0]; v_[393] = cos(v_[391]); v_[394] = 0.5 * x_in[1]; v_[395] = sin(v_[394]); v_[396] = v_[393] * v_[395]; v_[397] = 0.5 * x_in[2]; v_[398] = cos(v_[397]); v_[391] = sin(v_[391]); v_[394] = cos(v_[394]); v_[399] = v_[391] * v_[394]; v_[397] = sin(v_[397]); v_[400] = v_[396] * v_[398] - v_[399] * v_[397]; v_[401] = 2. * v_[400]; v_[399] = v_[399] * v_[398] + v_[396] * v_[397]; v_[396] = 2. * v_[399]; v_[394] = v_[393] * v_[394]; v_[391] = v_[391] * v_[395]; v_[395] = 2. * (v_[394] * v_[397] + v_[391] * v_[398]); v_[391] = v_[394] * v_[398] - v_[391] * v_[397]; v_[391] = (1 - v_[401] * v_[400] - v_[396] * v_[399]) * (0.33 * v_[180] * v_[389] * v_[388] - 0.33 * v_[180] * v_[219] * v_[220] - 0.35 * v_[180] * v_[219] - 0.08 * v_[180]) + (v_[395] * v_[400] + v_[396] * v_[391]) * (0.207 + 0.33 * v_[390] * v_[389] * v_[388] - 0.33 * v_[390] * v_[219] * v_[220] - 0.35 * v_[390] * v_[219] - 0.08 * v_[390]) + (v_[395] * v_[399] - v_[401] * v_[391]) * (0.3735 + -0.33 * v_[219] * v_[388] - 0.33 * v_[389] * v_[220] - 0.35 * v_[389]) + x_in[5]; v_[395] = 1 / (1 + exp(100. * v_[391])); v_[391] = -1000. * ((1 - v_[385] - v_[386]) * v_[387] + (v_[383] + v_[1]) * v_[360] + (v_[392] - v_[351]) * v_[98]) * v_[395] + 5000. * exp(-100. * (v_[391] + 0.02)); v_[370] = v_[323] * v_[370]; v_[330] = v_[330] * v_[336]; v_[22] = v_[341] * v_[22]; v_[3] = v_[384] * v_[3]; v_[96] = v_[341] * v_[96]; v_[1] = -1000. * ((v_[383] - v_[1]) * v_[387] + (1 - v_[22] - v_[386]) * v_[360] + (v_[3] + v_[96]) * v_[98]) * v_[395]; v_[383] = 2. * v_[375]; v_[386] = v_[383] * v_[336]; v_[341] = v_[323] * v_[36]; v_[96] = -1000. * ((v_[392] + v_[351]) * v_[387] + (v_[3] - v_[96]) * v_[360] + (1 - v_[22] - v_[385]) * v_[98]) * v_[395]; v_[3] = (v_[4] + v_[171]) * v_[391] + (1 - v_[370] - v_[330]) * v_[1] + (v_[386] - v_[341]) * v_[96]; v_[22] = 0.207 + 0.33 * v_[265] * v_[244] * v_[222] - 0.33 * v_[265] * v_[219] * v_[220] - 0.35 * v_[265] * v_[219] - 0.08 * v_[265]; v_[336] = v_[323] * v_[336]; v_[36] = v_[383] * v_[36]; v_[383] = v_[383] * v_[375]; v_[341] = (v_[336] - v_[36]) * v_[391] + (v_[386] + v_[341]) * v_[1] + (1 - v_[370] - v_[383]) * v_[96]; v_[386] = v_[364] * v_[3] - v_[22] * v_[341]; v_[370] = 0.33 * v_[180] * v_[244] * v_[222] - 0.33 * v_[180] * v_[219] * v_[220] - 0.35 * v_[180] * v_[219] - 0.08 * v_[180]; v_[383] = (1 - v_[383] - v_[330]) * v_[391] + (v_[4] - v_[171]) * v_[1] + (v_[336] + v_[36]) * v_[96]; v_[364] = v_[370] * v_[341] - v_[364] * v_[383]; v_[170] = v_[250] * v_[170] - v_[165] * v_[340]; v_[36] = 0.1104774 * (v_[83] - v_[150]) + 0.025941 * v_[380] - (0 - v_[265]) * v_[386] - v_[180] * v_[364] - (0.3735 * v_[180] - 0.35 * v_[180] * v_[244]) * v_[383] - (0.3735 * v_[265] - 0.35 * v_[265] * v_[244]) * v_[3] - (0.08 + 0.35 * v_[219] - 0.207 * v_[265]) * v_[341] - 0.0004405 * v_[170]; v_[336] = x_in[38] - v_[36]; v_[96] = v_[180] * v_[219]; v_[1] = v_[180] * v_[244]; v_[330] = v_[96] * v_[222] + v_[1] * v_[220]; v_[391] = v_[265] * v_[219]; v_[171] = v_[265] * v_[244]; v_[4] = v_[171] * v_[220]; v_[375] = v_[244] * v_[222] - v_[219] * v_[220]; v_[323] = v_[165] * v_[165]; v_[395] = v_[151] * v_[151]; v_[98] = -0.881 * (v_[150] - v_[83]) + 0.1104774 * v_[380] + -8.81e-05 * v_[369] + 0.414812462825713 * v_[102] + 0.00185880198235362 * v_[363] + 8.81e-05 * v_[311] + (0.1104774 * v_[336]) / 0.026181 - v_[330] * v_[383] - (v_[391] * v_[222] + v_[4]) * v_[3] - v_[375] * v_[341] - 0.0004405 * (v_[323] + v_[395]); v_[165] = v_[151] * v_[165]; v_[351] = (0 - v_[151]) * x_in[26]; v_[1] = v_[1] * v_[222] - v_[96] * v_[220]; v_[171] = v_[171] * v_[222]; v_[96] = v_[250] * v_[250]; v_[323] = 0.881 * v_[170] + -8.81e-05 * v_[165] + 0.0004405 * v_[380] + 0.00185880198235362 * v_[102] + 0.880992588508842 * v_[363] + -8.81e-05 * v_[351] + (-0.0004405 * v_[336]) / 0.026181 - v_[1] * v_[383] - (v_[171] - v_[391] * v_[220]) * v_[3] - ((0 - v_[219]) * v_[222] - v_[244] * v_[220]) * v_[341] - 0.1104774 * (v_[323] + v_[96]); v_[391] = (0 - v_[179]) * x_in[25]; v_[380] = v_[148] * x_in[25]; v_[392] = (0 - v_[382]) * x_in[25]; v_[360] = v_[157] * x_in[25]; v_[387] = v_[179] * v_[39]; v_[197] = v_[148] * v_[197] - v_[39] * v_[157]; v_[385] = v_[148] * v_[39]; v_[384] = v_[148] * v_[148]; v_[396] = v_[179] * v_[179]; v_[36] = 0.47422804 * (v_[10] - v_[307]) + 0.081641 * v_[154] + 2.1e-05 * v_[387] + (0.026181 * v_[336]) / 0.026181 + v_[36] + v_[37] * v_[98] + v_[225] * v_[323] - -0.0825825 * v_[197] - 0.000102 * v_[385] - 0.007418 * (v_[384] - v_[396]); v_[399] = x_in[37] - v_[36]; v_[39] = v_[39] * v_[39]; v_[10] = -3.146 * (v_[307] - v_[10]) + 0.47422804 * v_[154] + v_[220] * v_[98] + v_[222] * v_[323] + v_[230] * v_[391] + v_[243] * v_[380] + v_[255] * v_[392] + v_[254] * v_[360] + (v_[241] * v_[399]) / v_[232] - -0.0825825 * (v_[39] + v_[396]); v_[197] = 3.146 * v_[197] + -0.0825825 * v_[154] + v_[372] * v_[98] + v_[220] * v_[323] + v_[247] * v_[391] + v_[224] * v_[380] + v_[254] * v_[392] + v_[235] * v_[360] + (v_[239] * v_[399]) / v_[232] - 0.47422804 * (v_[39] + v_[384]); v_[323] = (0 - v_[359]) * x_in[24]; v_[98] = v_[312] * x_in[24]; v_[254] = (0 - v_[368]) * x_in[24]; v_[235] = v_[54] * x_in[24]; v_[307] = v_[359] * v_[312]; v_[255] = v_[215] * v_[368]; v_[361] = v_[359] * v_[361]; v_[215] = v_[359] * v_[215]; v_[401] = v_[265] * v_[219]; v_[370] = v_[22] * v_[383] - v_[370] * v_[3]; v_[22] = 0.207 * v_[180]; v_[340] = v_[151] * v_[340] - v_[250] * v_[321]; v_[375] = -8.81e-05 * v_[170] + -8.81e-05 * v_[363] + 0.026409 * v_[351] - v_[330] * v_[386] - (v_[401] * v_[222] + v_[4]) * v_[364] - v_[375] * v_[370] - (((0.08 * v_[265] - 0.207) * v_[244] + 0.3735 * v_[265] * v_[219]) * v_[222] + ((0.207 - 0.08 * v_[265]) * v_[219] + 0.3735 * v_[265] * v_[244] - 0.35 * v_[265]) * v_[220]) * v_[383] - ((-0.08 * v_[180] * v_[244] - 0.3735 * v_[180] * v_[219]) * v_[222] + (0.08 * v_[180] * v_[219] + -0.3735 * v_[180] * v_[244] + 0.35 * v_[180]) * v_[220]) * v_[3] - (v_[22] * v_[219] * v_[222] + 0.207 * v_[180] * v_[244] * v_[220]) * v_[341] - 0.1104774 * v_[340] - 0.025713 * v_[165]; v_[22] = 0.0004405 * v_[340] + -8.81e-05 * (v_[150] - v_[83]) + -0.000227999999999999 * v_[369] + 8.81e-05 * v_[102] + 0.000468 * v_[311] - v_[1] * v_[386] - (v_[171] - v_[401] * v_[220]) * v_[364] - ((0 - v_[219]) * v_[222] - v_[244] * v_[220]) * v_[370] - (((0.207 - 0.08 * v_[265]) * v_[219] + 0.3735 * v_[265] * v_[244] - 0.35 * v_[265]) * v_[222] + ((0.207 - 0.08 * v_[265]) * v_[244] - 0.3735 * v_[265] * v_[219]) * v_[220]) * v_[383] - ((0.08 * v_[180] * v_[219] + -0.3735 * v_[180] * v_[244] + 0.35 * v_[180]) * v_[222] + (0.08 * v_[180] * v_[244] + 0.3735 * v_[180] * v_[219]) * v_[220]) * v_[3] - (v_[22] * v_[244] * v_[222] - 0.207 * v_[180] * v_[219] * v_[220]) * v_[341]; v_[340] = 0.881 * v_[340] + 0.1104774 * v_[165] + 0.0004405 * v_[369] + -0.1104774 * v_[351] + 0.0004405 * v_[311] - (0 - v_[265]) * v_[383] - v_[180] * v_[3] - -8.81e-05 * (v_[96] + v_[395]); v_[157] = v_[179] * v_[157] - v_[148] * v_[382]; v_[396] = 0.007418 * v_[385] + 0.000102 * (v_[39] - v_[396]) + v_[220] * v_[375] + v_[222] * v_[22] + -0.35 * v_[340] + v_[238] * v_[391] + v_[227] * v_[380] + v_[230] * v_[392] + v_[247] * v_[360] + (v_[226] * v_[399]) / v_[232] - 0.47422804 * v_[157] - 0.084376 * v_[387] - 2.1e-05 * v_[154]; v_[22] = 0.002735 * v_[385] + -0.0825825 * v_[157] + 0.000102 * v_[154] + v_[223] * v_[375] + v_[220] * v_[22] + v_[227] * v_[391] + v_[221] * v_[380] + v_[243] * v_[392] + v_[224] * v_[360] + (v_[231] * v_[399]) / v_[232] - 0.007418 * v_[387] - 2.1e-05 * (v_[39] - v_[384]); v_[157] = 0.47422804 * v_[387] + 3.146 * v_[157] + -0.0825825 * v_[385] + v_[340] + v_[240] * v_[391] + v_[234] * v_[380]; v_[340] = v_[312] * v_[312]; v_[385] = 0.009466 * v_[307] + 0.1466472 * (v_[255] - v_[361]) + -5.1e-05 * v_[215] + v_[219] * v_[396] + v_[244] * v_[22] + -0.08 * v_[157] - -0.022734 * v_[90] - -3.6e-05 * (v_[340] - v_[381]); v_[387] = x_in[36] - v_[385]; v_[368] = v_[359] * v_[54] - v_[312] * v_[368]; v_[36] = 0.5824264 * v_[182] + -3.6e-05 * v_[90] + -0.022734 * (v_[110] - v_[381]) + v_[155] * v_[10] + v_[249] * v_[197] + -1 * (v_[36] + (v_[232] * v_[399]) / v_[232]) + v_[271] * v_[323] + v_[228] * v_[98] + v_[273] * v_[254] + v_[270] * v_[235] + (v_[290] * v_[387]) / v_[258] - 0.1466472 * v_[368] - -0.123672 * v_[215] - -5.1e-05 * v_[307]; v_[22] = -0.133138 * v_[90] + 0.5824264 * (v_[361] - v_[255]) + -0.022734 * v_[307] + v_[245] * v_[396] + v_[219] * v_[22] + v_[228] * v_[323] + v_[252] * v_[98] + v_[283] * v_[254] + v_[262] * v_[235] + (v_[233] * v_[387]) / v_[258] - -3.6e-05 * v_[215] - -5.1e-05 * (v_[110] - v_[340]); v_[368] = 3.44 * v_[368] + 0.1466472 * v_[215] + v_[219] * v_[10] + v_[244] * v_[197] + v_[251] * v_[323] + v_[236] * v_[98] + v_[286] * v_[254] + v_[280] * v_[235] + (v_[259] * v_[387]) / v_[258] - 0.5824264 * (v_[340] + v_[381]); v_[157] = 0.1466472 * v_[307] + -3.44 * (v_[361] - v_[255]) + 0.5824264 * v_[90] + -1 * v_[157] + v_[273] * v_[323] + v_[283] * v_[98] + v_[285] * v_[254] + v_[274] * v_[235] + (v_[272] * v_[387]) / v_[258]; v_[340] = 3.44 * v_[182] + 0.5824264 * v_[215] + v_[174] * v_[10] + v_[219] * v_[197] + v_[270] * v_[323] + v_[262] * v_[98] + v_[274] * v_[254] + v_[278] * v_[235] + (v_[260] * v_[387]) / v_[258] - 0.1466472 * (v_[110] + v_[340]); v_[215] = x_in[19] * x_in[23] - x_in[20] * x_in[22]; v_[197] = x_in[19] * x_in[20]; v_[10] = x_in[19] * x_in[19]; v_[110] = x_in[18] * x_in[18]; v_[67] = v_[52] * v_[48] + v_[67] * v_[9]; v_[124] = v_[139] * v_[121] + v_[124] * v_[26]; v_[210] = v_[195] * v_[191] + v_[210] * v_[108]; v_[269] = v_[284] * v_[266] + v_[269] * v_[169]; v_[277] = 0.190812 + v_[60] + v_[132] + v_[203] + v_[277]; v_[45] = v_[50] * v_[45] + v_[48] * v_[71]; v_[116] = v_[26] * v_[116] + v_[120] * v_[143]; v_[188] = v_[193] * v_[188] + v_[191] * v_[214]; v_[261] = v_[169] * v_[261] + v_[265] * v_[288]; v_[288] = (-0.00571 + v_[45] + v_[116] + v_[188] + v_[261]) / v_[229]; v_[214] = 0.012668 + v_[67] + v_[124] + v_[210] + v_[269] - v_[277] * v_[288]; v_[143] = x_in[20] * x_in[20]; v_[71] = x_in[18] * x_in[22] - x_in[19] * x_in[21]; v_[261] = -0.00571 + v_[45] + v_[116] + v_[188] + v_[261]; v_[385] = (-0.0053433 * v_[71] + 2.0678571 * (v_[298] - v_[156]) + 0.473273 * v_[197] + 0.190812 * v_[97] + (v_[42] * v_[313]) / v_[42] + v_[325] + v_[289] * v_[318] + v_[47] * v_[333] + v_[72] * v_[14] + v_[118] * v_[306] + -1 * (v_[324] + (v_[113] * v_[371]) / v_[113]) + (v_[185] * v_[100]) / v_[185] + v_[8] + v_[144] * v_[101] + v_[190] * v_[320] + v_[237] * v_[157] + v_[263] * v_[340] + -1 * (v_[385] + (v_[258] * v_[387]) / v_[258]) - -0.00571 * v_[296] - 0.012668 * (v_[143] - v_[10])) / v_[229]; v_[264] = 5.5837 + v_[293] + v_[73] + v_[295] + v_[119] + v_[297] + v_[216] + v_[299] + v_[264] - v_[261] * v_[288]; v_[216] = (2.0678571 * v_[215] + -0.00571 * v_[197] + 0.190812 * (v_[143] - v_[110]) + v_[9] * v_[339] + v_[48] * v_[335] + v_[49] * v_[318] + v_[303] * v_[333] + v_[26] * v_[346] + v_[121] * v_[337] + v_[122] * v_[14] + v_[24] * v_[306] + v_[108] * v_[326] + v_[191] * v_[343] + v_[192] * v_[101] + v_[126] * v_[320] + v_[169] * v_[36] + v_[266] * v_[22] + v_[267] * v_[157] + v_[167] * v_[340] - -1.1915559 * v_[71] - 4.847485 * v_[296] - 0.012668 * v_[97] - v_[261] * v_[385]) / v_[264]; v_[269] = (0.012668 + v_[67] + v_[124] + v_[210] + v_[269] - v_[261] * v_[300]) / v_[264]; v_[248] = 6.056973 + v_[293] + v_[33] + v_[295] + v_[103] + v_[297] + v_[176] + v_[299] + v_[248] - v_[214] * v_[269] - v_[277] * v_[300]; v_[22] = (-1.1915559 * (v_[156] - v_[298]) + 4.374212 * v_[97] + 0.012668 * v_[296] + v_[48] * v_[339] + v_[50] * v_[335] + 0.207 * v_[305] + v_[51] * v_[318] + v_[302] * v_[333] + v_[120] * v_[346] + v_[26] * v_[337] + 0.207 * v_[256] + v_[123] * v_[14] + v_[13] * v_[306] + v_[191] * v_[326] + v_[193] * v_[343] + 0.207 * v_[294] + v_[194] * v_[101] + v_[107] * v_[320] + v_[265] * v_[36] + v_[169] * v_[22] + 0.207 * v_[368] + v_[268] * v_[157] + v_[310] * v_[340] - -0.0053433 * v_[215] - 0.190812 * v_[197] - -0.00571 * (v_[10] - v_[110]) - v_[214] * v_[216] - v_[277] * v_[385]) / v_[248]; v_[75] = v_[64] - v_[75]; v_[65] = v_[46] + v_[65] - v_[75]; v_[59] = v_[59] + v_[53]; v_[53] = v_[50] * v_[65] + v_[48] * v_[59]; v_[70] = v_[70] + v_[66]; v_[58] = v_[56] + v_[58] - v_[75]; v_[56] = v_[50] * v_[70] + v_[48] * v_[58]; v_[66] = v_[53] * v_[48] + v_[56] * v_[9]; v_[146] = v_[136] - v_[146]; v_[137] = v_[117] + v_[137] - v_[146]; v_[131] = v_[131] + v_[125]; v_[125] = v_[26] * v_[137] + v_[120] * v_[131]; v_[142] = v_[142] + v_[138]; v_[130] = v_[128] + v_[130] - v_[146]; v_[128] = v_[26] * v_[142] + v_[120] * v_[130]; v_[138] = v_[125] * v_[121] + v_[128] * v_[26]; v_[218] = v_[207] - v_[218]; v_[208] = v_[189] + v_[208] - v_[218]; v_[202] = v_[202] + v_[196]; v_[196] = v_[193] * v_[208] + v_[191] * v_[202]; v_[213] = v_[213] + v_[209]; v_[201] = v_[199] + v_[201] - v_[218]; v_[199] = v_[193] * v_[213] + v_[191] * v_[201]; v_[209] = v_[196] * v_[191] + v_[199] * v_[108]; v_[291] = v_[281] - v_[291]; v_[282] = v_[262] + v_[282] - v_[291]; v_[276] = v_[276] + v_[270]; v_[270] = v_[169] * v_[282] + v_[265] * v_[276]; v_[287] = v_[287] + v_[283]; v_[275] = v_[273] + v_[275] - v_[291]; v_[273] = v_[169] * v_[287] + v_[265] * v_[275]; v_[283] = v_[270] * v_[266] + v_[273] * v_[169]; v_[130] = v_[121] * v_[142] + v_[26] * v_[130]; v_[131] = v_[121] * v_[137] + v_[26] * v_[131]; v_[137] = v_[130] * v_[26] + v_[131] * v_[121] + v_[146]; v_[58] = v_[48] * v_[70] + v_[9] * v_[58]; v_[59] = v_[48] * v_[65] + v_[9] * v_[59]; v_[65] = v_[58] * v_[9] + v_[59] * v_[48] + v_[75]; v_[201] = v_[191] * v_[213] + v_[108] * v_[201]; v_[202] = v_[191] * v_[208] + v_[108] * v_[202]; v_[208] = v_[201] * v_[108] + v_[202] * v_[191] + v_[218]; v_[275] = v_[266] * v_[287] + v_[169] * v_[275]; v_[276] = v_[266] * v_[282] + v_[169] * v_[276]; v_[282] = v_[275] * v_[169] + v_[276] * v_[266] + v_[291]; v_[253] = v_[253] + v_[35]; v_[292] = v_[292] + v_[253]; v_[30] = v_[30] + v_[20]; v_[242] = v_[242] + v_[30]; v_[253] = 0 - v_[253]; v_[20] = v_[50] * v_[253] + v_[48] * v_[30]; v_[35] = v_[292] * v_[9] + v_[242] * v_[48] - v_[20]; v_[21] = v_[21] + v_[106]; v_[40] = -1 * (v_[40] + v_[21]); v_[99] = v_[99] + v_[85]; v_[41] = -1 * (v_[41] + v_[99]); v_[21] = 0 - v_[21]; v_[85] = v_[26] * v_[21] + v_[120] * v_[99]; v_[106] = v_[40] * v_[26] + v_[41] * v_[121] - v_[85]; v_[86] = v_[86] + v_[178]; v_[111] = v_[111] + v_[86]; v_[173] = v_[173] + v_[163]; v_[112] = v_[112] + v_[173]; v_[86] = 0 - v_[86]; v_[163] = v_[193] * v_[86] + v_[191] * v_[173]; v_[178] = v_[111] * v_[108] + v_[112] * v_[191] - v_[163]; v_[164] = v_[164] + v_[251]; v_[183] = -1 * (v_[183] + v_[164]); v_[246] = v_[246] + v_[236]; v_[184] = -1 * (v_[184] + v_[246]); v_[164] = 0 - v_[164]; v_[236] = v_[169] * v_[164] + v_[265] * v_[246]; v_[251] = v_[183] * v_[169] + v_[184] * v_[266] - v_[236]; v_[287] = (-2.0678571 + v_[35] + v_[106] + v_[178] + v_[251]) / v_[229]; v_[213] = (v_[137] + v_[65] + v_[208] + v_[282] - v_[261] * v_[287]) / v_[264]; v_[70] = (-1.1915559 + v_[66] + v_[138] + v_[209] + v_[283] - v_[214] * v_[213] - v_[277] * v_[287]) / v_[248]; v_[251] = -2.0678571 + v_[35] + v_[106] + v_[178] + v_[251]; v_[282] = v_[137] + v_[65] + v_[208] + v_[282] - v_[251] * v_[288]; v_[283] = -1.1915559 + v_[66] + v_[138] + v_[209] + v_[283] - v_[282] * v_[269] - v_[251] * v_[300]; v_[141] = 0 - v_[141]; v_[209] = v_[26] * v_[141] + v_[120] * v_[135]; v_[69] = 0 - v_[69]; v_[138] = v_[50] * v_[69] + v_[48] * v_[63]; v_[212] = 0 - v_[212]; v_[66] = v_[193] * v_[212] + v_[191] * v_[206]; v_[286] = 0 - v_[286]; v_[208] = v_[169] * v_[286] + v_[265] * v_[280]; v_[65] = (v_[146] + v_[75] + v_[218] + v_[291]) / v_[229]; v_[137] = (2.0678571 + v_[20] + v_[85] + v_[163] + v_[236] - v_[261] * v_[65]) / v_[264]; v_[253] = v_[48] * v_[253] + v_[9] * v_[30]; v_[30] = 0 - v_[253]; v_[21] = v_[121] * v_[21] + v_[26] * v_[99]; v_[99] = 0 - v_[21]; v_[86] = v_[191] * v_[86] + v_[108] * v_[173]; v_[173] = 0 - v_[86]; v_[164] = v_[266] * v_[164] + v_[169] * v_[246]; v_[246] = 0 - v_[164]; v_[178] = (0.0053433 + v_[30] + v_[99] + v_[173] + v_[246] - v_[214] * v_[137] - v_[277] * v_[65]) / v_[248]; v_[106] = v_[209] + v_[138] + v_[66] + v_[208] - v_[282] * v_[137] - v_[251] * v_[65] - v_[283] * v_[178]; v_[35] = v_[146] + v_[75] + v_[218] + v_[291]; v_[236] = 2.0678571 + v_[20] + v_[85] + v_[163] + v_[236] - v_[35] * v_[288]; v_[246] = 0.0053433 + v_[30] + v_[99] + v_[173] + v_[246] - v_[236] * v_[269] - v_[35] * v_[300]; v_[68] = v_[68] - v_[62]; v_[61] = v_[61] - v_[62]; v_[140] = v_[140] - v_[134]; v_[133] = v_[133] - v_[134]; v_[211] = v_[211] - v_[205]; v_[204] = v_[204] - v_[205]; v_[285] = v_[285] - v_[279]; v_[278] = v_[278] - v_[279]; v_[173] = v_[48] * v_[57]; v_[99] = (v_[48] * v_[61] + v_[9] * v_[57]) * v_[48] + (v_[173] + v_[9] * v_[68]) * v_[9]; v_[173] = v_[50] * v_[61] + v_[173]; v_[57] = v_[50] * v_[57] + v_[48] * v_[68]; v_[30] = v_[173] * v_[50] + v_[57] * v_[48]; v_[163] = v_[26] * v_[129]; v_[85] = (v_[121] * v_[133] + v_[163]) * v_[121] + (v_[121] * v_[129] + v_[26] * v_[140]) * v_[26]; v_[129] = v_[26] * v_[133] + v_[120] * v_[129]; v_[163] = v_[163] + v_[120] * v_[140]; v_[20] = v_[129] * v_[26] + v_[163] * v_[120]; v_[142] = v_[191] * v_[200]; v_[262] = (v_[191] * v_[204] + v_[108] * v_[200]) * v_[191] + (v_[142] + v_[108] * v_[211]) * v_[108]; v_[142] = v_[193] * v_[204] + v_[142]; v_[200] = v_[193] * v_[200] + v_[191] * v_[211]; v_[281] = v_[142] * v_[193] + v_[200] * v_[191]; v_[189] = v_[169] * v_[274]; v_[207] = (v_[266] * v_[278] + v_[189]) * v_[266] + (v_[266] * v_[274] + v_[169] * v_[285]) * v_[169]; v_[274] = v_[169] * v_[278] + v_[265] * v_[274]; v_[189] = v_[189] + v_[265] * v_[285]; v_[117] = v_[274] * v_[169] + v_[189] * v_[265]; v_[278] = 53.433 + v_[68] + v_[61] + v_[62] + v_[140] + v_[133] + v_[134] + v_[211] + v_[204] + v_[205] + v_[285] + v_[278] + v_[279] - v_[99] - v_[30] - v_[85] - v_[20] - v_[262] - v_[281] - v_[207] - v_[117] - v_[236] * v_[137] - v_[35] * v_[65] - v_[246] * v_[178]; v_[215] = (53.433 * v_[215] + 2.0678571 * v_[296] + -0.0053433 * v_[97] + v_[305] + -1 * v_[256] + v_[294] + -1 * v_[368] - -1.1915559 * (v_[143] + v_[10]) - v_[236] * v_[216] - v_[35] * v_[385] - v_[246] * v_[22]) / v_[278]; v_[208] = (v_[209] + v_[138] + v_[66] + v_[208] - v_[236] * v_[213] - v_[35] * v_[287] - v_[246] * v_[70]) / v_[278]; v_[207] = 53.433 + v_[62] + v_[99] + v_[134] + v_[85] + v_[205] + v_[262] + v_[279] + v_[207] - v_[282] * v_[213] - v_[251] * v_[287] - v_[283] * v_[70] - v_[106] * v_[208]; v_[143] = (-53.433 * (v_[298] - v_[156]) + -1.1915559 * v_[97] + 2.0678571 * v_[197] + v_[9] * v_[318] + v_[48] * v_[333] + v_[105] * v_[14] + v_[352] * v_[306] + v_[108] * v_[101] + v_[191] * v_[320] + v_[309] * v_[157] + v_[374] * v_[340] - -0.0053433 * (v_[143] + v_[110]) - v_[282] * v_[216] - v_[251] * v_[385] - v_[283] * v_[22] - v_[106] * v_[215]) / v_[207]; v_[163] = v_[129] * v_[121] + v_[163] * v_[26]; v_[57] = v_[173] * v_[48] + v_[57] * v_[9]; v_[200] = v_[142] * v_[191] + v_[200] * v_[108]; v_[189] = v_[274] * v_[266] + v_[189] * v_[169]; v_[59] = v_[59] * v_[50] + v_[58] * v_[48]; v_[131] = v_[131] * v_[26] + v_[130] * v_[120]; v_[202] = v_[202] * v_[193] + v_[201] * v_[191]; v_[276] = v_[276] * v_[169] + v_[275] * v_[265]; v_[253] = v_[292] * v_[48] + v_[242] * v_[50] + v_[253]; v_[21] = v_[40] * v_[120] + v_[41] * v_[26] + v_[21]; v_[86] = v_[111] * v_[191] + v_[112] * v_[193] + v_[86]; v_[164] = v_[183] * v_[265] + v_[184] * v_[169] + v_[164]; v_[229] = (-0.0053433 + v_[253] + v_[21] + v_[86] + v_[164]) / v_[229]; v_[264] = (1.1915559 + v_[59] + v_[131] + v_[202] + v_[276] - v_[261] * v_[229]) / v_[264]; v_[128] = v_[128] * v_[120] + v_[125] * v_[26] + v_[146]; v_[56] = v_[56] * v_[48] + v_[53] * v_[50] + v_[75]; v_[199] = v_[199] * v_[191] + v_[196] * v_[193] + v_[218]; v_[273] = v_[273] * v_[265] + v_[270] * v_[169] + v_[291]; v_[248] = (v_[128] + v_[56] + v_[199] + v_[273] - v_[214] * v_[264] - v_[277] * v_[229]) / v_[248]; v_[141] = 0 - v_[26] * v_[135] - v_[121] * v_[141]; v_[69] = 0 - v_[9] * v_[63] - v_[48] * v_[69]; v_[212] = 0 - v_[108] * v_[206] - v_[191] * v_[212]; v_[286] = 0 - v_[169] * v_[280] - v_[266] * v_[286]; v_[278] = (v_[141] + v_[69] + v_[212] + v_[286] - v_[236] * v_[264] - v_[35] * v_[229] - v_[246] * v_[248]) / v_[278]; v_[207] = (v_[163] + v_[57] + v_[200] + v_[189] - v_[282] * v_[264] - v_[251] * v_[229] - v_[283] * v_[248] - v_[106] * v_[278]) / v_[207]; v_[164] = -0.0053433 + v_[253] + v_[21] + v_[86] + v_[164]; v_[276] = 1.1915559 + v_[59] + v_[131] + v_[202] + v_[276] - v_[164] * v_[288]; v_[273] = v_[128] + v_[56] + v_[199] + v_[273] - v_[276] * v_[269] - v_[164] * v_[300]; v_[286] = v_[141] + v_[69] + v_[212] + v_[286] - v_[276] * v_[137] - v_[164] * v_[65] - v_[273] * v_[178]; v_[189] = v_[163] + v_[57] + v_[200] + v_[189] - v_[276] * v_[213] - v_[164] * v_[287] - v_[273] * v_[70] - v_[286] * v_[208]; v_[189] = (53.433 * v_[71] + -1.1915559 * v_[296] + -0.0053433 * v_[197] + v_[48] * v_[318] + v_[304] * v_[333] + v_[120] * v_[14] + v_[18] * v_[306] + v_[191] * v_[101] + v_[257] * v_[320] + v_[265] * v_[157] + v_[161] * v_[340] - 2.0678571 * (v_[10] + v_[110]) - v_[276] * v_[216] - v_[164] * v_[385] - v_[273] * v_[22] - v_[286] * v_[215] - v_[189] * v_[143]) / (53.433 + v_[62] + v_[30] + v_[134] + v_[20] + v_[205] + v_[281] + v_[279] + v_[117] - v_[276] * v_[264] - v_[164] * v_[229] - v_[273] * v_[248] - v_[286] * v_[278] - v_[189] * v_[207]); v_[207] = v_[143] - v_[207] * v_[189]; v_[278] = v_[215] - v_[278] * v_[189] - v_[208] * v_[207]; v_[248] = v_[22] - v_[70] * v_[207] - v_[178] * v_[278] - v_[248] * v_[189]; forwardZero[20] = 0 - v_[248]; v_[264] = v_[216] - v_[137] * v_[278] - v_[269] * v_[248] - v_[213] * v_[207] - v_[264] * v_[189]; forwardZero[18] = v_[300] * v_[248] + v_[288] * v_[264] + v_[65] * v_[278] + v_[287] * v_[207] + v_[229] * v_[189] - v_[385]; forwardZero[19] = 0 - v_[264]; v_[278] = 0 - v_[278]; v_[264] = 0.5 * x_in[0]; v_[248] = cos(v_[264]); v_[229] = 0.5 * x_in[1]; v_[65] = cos(v_[229]); v_[287] = v_[248] * v_[65]; v_[385] = 0.5 * x_in[2]; v_[288] = sin(v_[385]); v_[264] = sin(v_[264]); v_[229] = sin(v_[229]); v_[300] = v_[264] * v_[229]; v_[385] = cos(v_[385]); v_[137] = 2. * (v_[287] * v_[288] + v_[300] * v_[385]); v_[264] = v_[264] * v_[65]; v_[229] = v_[248] * v_[229]; v_[248] = v_[264] * v_[385] + v_[229] * v_[288]; v_[229] = v_[229] * v_[385] - v_[264] * v_[288]; v_[264] = 2. * v_[229]; v_[385] = v_[287] * v_[385] - v_[300] * v_[288]; forwardZero[21] = v_[278] + -9.81 * (v_[137] * v_[248] - v_[264] * v_[385]); v_[207] = 0 - v_[207]; v_[300] = 2. * v_[248]; forwardZero[22] = v_[207] + -9.81 * (v_[137] * v_[229] + v_[300] * v_[385]); v_[189] = 0 - v_[189]; forwardZero[23] = v_[189] + -9.81 * (1 - v_[264] * v_[229] - v_[300] * v_[248]); v_[300] = -1 * forwardZero[18]; v_[323] = v_[265] * forwardZero[20] + v_[169] * forwardZero[19] + v_[323]; v_[98] = v_[169] * forwardZero[20] + v_[266] * forwardZero[19] + v_[98]; v_[266] = -1 * v_[278]; v_[169] = 0.207 * forwardZero[20] + v_[266]; v_[235] = v_[310] * forwardZero[20] + v_[167] * forwardZero[19] + v_[263] * forwardZero[18] + v_[161] * v_[189] + v_[374] * v_[207] + v_[235]; forwardZero[24] = (v_[387] - v_[258] * v_[300] - v_[290] * v_[323] - v_[233] * v_[98] - v_[259] * v_[169] - v_[272] * (v_[268] * forwardZero[20] + v_[267] * forwardZero[19] + v_[237] * forwardZero[18] + v_[265] * v_[189] + v_[309] * v_[207] + v_[254]) - v_[260] * v_[235]) / v_[258]; v_[387] = -1 * v_[323]; v_[254] = v_[300] + forwardZero[24]; v_[392] = v_[219] * v_[169] + v_[174] * v_[235] + v_[155] * v_[323] + v_[392]; v_[235] = v_[244] * v_[169] + v_[219] * v_[235] + v_[249] * v_[323] + v_[360]; forwardZero[25] = (v_[399] - v_[232] * v_[387] - v_[226] * (v_[219] * v_[254] + v_[245] * v_[98] + v_[391]) - v_[231] * (v_[244] * v_[254] + v_[219] * v_[98] + v_[380]) - v_[241] * v_[392] - v_[239] * v_[235]) / v_[232]; v_[387] = v_[387] + forwardZero[25]; forwardZero[26] = (v_[336] - 0.1104774 * (v_[220] * v_[392] + v_[372] * v_[235] + v_[37] * v_[387] + v_[102]) - -0.0004405 * (v_[222] * v_[392] + v_[220] * v_[235] + v_[225] * v_[387] + v_[363]) - 0.026181 * v_[387]) / 0.026181; v_[377] = v_[191] * forwardZero[20] + v_[108] * forwardZero[19] + v_[377]; v_[362] = v_[193] * forwardZero[20] + v_[191] * forwardZero[19] + v_[362]; v_[193] = 0.207 * forwardZero[20] + v_[278]; v_[162] = v_[107] * forwardZero[20] + v_[126] * forwardZero[19] + v_[190] * forwardZero[18] + v_[257] * v_[189] + v_[191] * v_[207] + v_[162]; forwardZero[27] = (v_[100] - v_[185] * forwardZero[18] - v_[217] * v_[377] - v_[160] * v_[362] - v_[186] * v_[193] - v_[198] * (v_[194] * forwardZero[20] + v_[192] * forwardZero[19] + v_[144] * forwardZero[18] + v_[191] * v_[189] + v_[108] * v_[207] + v_[181]) - v_[187] * v_[162]) / v_[185]; v_[100] = forwardZero[18] + forwardZero[27]; v_[378] = v_[87] * v_[193] + v_[93] * v_[162] + v_[89] * v_[377] + v_[378]; v_[162] = v_[80] * v_[193] + v_[92] * v_[162] + v_[177] * v_[377] + v_[379]; forwardZero[28] = (v_[366] - v_[159] * v_[377] - v_[153] * (v_[175] * v_[100] + v_[172] * v_[362] + v_[376]) - v_[158] * (v_[172] * v_[100] + v_[92] * v_[362] + v_[373]) - v_[168] * v_[378] - v_[166] * v_[162]) / v_[159]; v_[377] = v_[377] + forwardZero[28]; forwardZero[29] = (v_[319] - 0.1104774 * (v_[147] * v_[378] + v_[367] * v_[162] + v_[365] * v_[377] + v_[322]) - -0.0004405 * (v_[149] * v_[378] + v_[147] * v_[162] + v_[152] * v_[377] + v_[338]) - 0.026181 * v_[377]) / 0.026181; v_[28] = v_[120] * forwardZero[20] + v_[26] * forwardZero[19] + v_[28]; v_[308] = v_[26] * forwardZero[20] + v_[121] * forwardZero[19] + v_[308]; v_[266] = 0.207 * forwardZero[20] + v_[266]; v_[84] = v_[13] * forwardZero[20] + v_[24] * forwardZero[19] + v_[118] * forwardZero[18] + v_[18] * v_[189] + v_[352] * v_[207] + v_[84]; forwardZero[30] = (v_[371] - v_[113] * v_[300] - v_[145] * v_[28] - v_[82] * v_[308] - v_[114] * v_[266] - v_[127] * (v_[123] * forwardZero[20] + v_[122] * forwardZero[19] + v_[72] * forwardZero[18] + v_[120] * v_[189] + v_[105] * v_[207] + v_[109]) - v_[115] * v_[84]) / v_[113]; v_[371] = -1 * v_[28]; v_[300] = v_[300] + forwardZero[30]; v_[331] = v_[76] * v_[266] + v_[31] * v_[84] + v_[12] * v_[28] + v_[331]; v_[84] = v_[94] * v_[266] + v_[76] * v_[84] + v_[104] * v_[28] + v_[11]; forwardZero[31] = (v_[350] - v_[81] * v_[371] - 2.1e-05 * (v_[76] * v_[300] + v_[95] * v_[308] + v_[27]) - -0.000102 * (v_[94] * v_[300] + v_[76] * v_[308] + v_[358]) - v_[91] * v_[331] - v_[88] * v_[84]) / v_[81]; v_[371] = v_[371] + forwardZero[31]; forwardZero[32] = (v_[342] - 0.110125 * (v_[79] * v_[331] + v_[314] * v_[84] + v_[345] * v_[371] + v_[355]) - 0.000881 * (v_[77] * v_[331] + v_[79] * v_[84] + v_[78] * v_[371] + v_[354]) - 0.026181 * v_[371]) / 0.026181; v_[348] = v_[48] * forwardZero[20] + v_[9] * forwardZero[19] + v_[348]; v_[344] = v_[50] * forwardZero[20] + v_[48] * forwardZero[19] + v_[344]; v_[278] = 0.207 * forwardZero[20] + v_[278]; v_[19] = v_[302] * forwardZero[20] + v_[303] * forwardZero[19] + v_[47] * forwardZero[18] + v_[304] * v_[189] + v_[48] * v_[207] + v_[19]; forwardZero[33] = (v_[313] - v_[42] * forwardZero[18] - v_[74] * v_[348] - v_[17] * v_[344] - v_[43] * v_[278] - v_[55] * (v_[51] * forwardZero[20] + v_[49] * forwardZero[19] + v_[289] * forwardZero[18] + v_[48] * v_[189] + v_[9] * v_[207] + v_[38]) - v_[44] * v_[19]) / v_[42]; v_[189] = forwardZero[18] + forwardZero[33]; v_[356] = v_[315] * v_[278] + v_[316] * v_[19] + v_[317] * v_[348] + v_[356]; v_[19] = v_[301] * v_[278] + v_[5] * v_[19] + v_[34] * v_[348] + v_[357]; forwardZero[34] = (v_[349] - v_[16] * v_[348] - v_[6] * (v_[32] * v_[189] + v_[29] * v_[344] + v_[347]) - v_[15] * (v_[29] * v_[189] + v_[5] * v_[344] + v_[353]) - v_[25] * v_[356] - v_[23] * v_[19]) / v_[16]; v_[348] = v_[348] + forwardZero[34]; forwardZero[35] = (v_[329] - 0.1104774 * (v_[2] * v_[356] + v_[332] * v_[19] + v_[327] * v_[348] + v_[334]) - 0.0004405 * (v_[7] * v_[356] + v_[2] * v_[19] + v_[0] * v_[348] + v_[328]) - 0.026181 * v_[348]) / 0.026181; // dependent variables without operations forwardZero[6] = x_in[24]; forwardZero[7] = x_in[25]; forwardZero[8] = x_in[26]; forwardZero[9] = x_in[27]; forwardZero[10] = x_in[28]; forwardZero[11] = x_in[29]; forwardZero[12] = x_in[30]; forwardZero[13] = x_in[31]; forwardZero[14] = x_in[32]; forwardZero[15] = x_in[33]; forwardZero[16] = x_in[34]; forwardZero[17] = x_in[35]; return eval_; } } } }
/** * MegBA is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2021 Megvii Inc. All rights reserved. * **/ #include <thrust/copy.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/transform.h> #include "operator/jet_vector.h" #include "operator/jet_vector_math_impl.h" namespace MegBA { namespace math { namespace impl { namespace { namespace TT { template <typename T> struct JetVectorMulJetVectorV { __host__ __device__ T operator()(thrust::tuple<T, T, T, T> zip) { T fa, fv, ga, gv; thrust::tie(fa, fv, ga, gv) = zip; return fa * gv + fv * ga; } }; template <typename T> struct Inverse : public thrust::unary_function<T, T> { __host__ __device__ T operator()(T x) { return T(1.) / x; } }; template <typename T> struct JetVectorDivJetVectorV { __host__ __device__ T operator()(thrust::tuple<T, T, T, T> zip) { T fa, fv, inv_ga, gv; thrust::tie(fa, fv, inv_ga, gv) = zip; return (fv - fa * inv_ga * gv) * inv_ga; } }; template <typename T> struct ScalarVectorDivJetVectorV { __host__ __device__ T operator()(thrust::tuple<T, T, T> zip) { T fa, inv_ga, gv; thrust::tie(fa, inv_ga, gv) = zip; return -fa * inv_ga * gv * inv_ga; } }; template <typename T> struct ScalarSubJetVector : public thrust::unary_function<T, T> { T scalar; explicit ScalarSubJetVector(T scalar) : scalar(scalar) {} __host__ __device__ T operator()(T x) { return scalar - x; } }; template <typename T> struct ScalarDivJetVectorA : public thrust::unary_function<T, T> { T scalar; explicit ScalarDivJetVectorA(T scalar) : scalar(scalar) {} __host__ __device__ T operator()(T x) { return scalar / x; } }; template <typename T> struct ScalarDivJetVectorV : public thrust::binary_function<T, T, T> { T scalar; explicit ScalarDivJetVectorV(T scalar) : scalar(scalar) {} __host__ __device__ T operator()(T a, T v) { return -v * scalar / (a * a); } }; template <typename T> struct AbsMask : public thrust::unary_function<T, T> { __host__ __device__ T operator()(T x) { return x > 0. ? T(1.) : T(-1.); } }; template <typename T> struct Sin : public thrust::unary_function<T, T> { __host__ __device__ T operator()(T x) { return std::sin(x); } }; template <typename T> struct NegativeSinMul : public thrust::binary_function<T, T, T> { __host__ __device__ T operator()(T a, T v) { return -std::sin(a) * v; } }; template <typename T> struct Cos : public thrust::unary_function<T, T> { __host__ __device__ T operator()(T x) { return std::cos(x); } }; template <typename T> struct CosMul : public thrust::binary_function<T, T, T> { __host__ __device__ T operator()(T a, T v) { return std::cos(a) * v; } }; template <typename T> struct Sqrt : public thrust::unary_function<T, T> { __host__ __device__ T operator()(T x) { return std::sqrt(x); } }; template <typename T> struct SqrtJetVectorV : public thrust::binary_function<T, T, T> { __host__ __device__ T operator()(T sqrted_a, T v) { return T(0.5) * v / sqrted_a; } }; } // namespace TT } // namespace template <typename T> void jetVectorAddJetVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), g.getCPUGrad()[i].begin(), out->getCPUGrad()[i].begin(), thrust::plus<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::plus<T>()); } template <typename T> void jetVectorAddScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { out->getCPUGrad() = f.getCPUGrad(); thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::plus<T>()); } template <typename T> void scalarVectorAddScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::plus<T>()); } template <typename T> void vectorAddVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { if (f.getGradShape() != 0) { if (g.getGradShape() != 0) { jetVectorAddJetVectorCPU(f, g, out); } else { jetVectorAddScalarVectorCPU(f, g, out); } } else { if (g.getGradShape() != 0) { jetVectorAddScalarVectorCPU(g, f, out); } else { scalarVectorAddScalarVectorCPU(f, g, out); } } } template void vectorAddVectorCPU<double>(const MegBA::JetVector<double> &f, const MegBA::JetVector<double> &g, MegBA::JetVector<double> *out); template void vectorAddVectorCPU<float>(const MegBA::JetVector<float> &f, const MegBA::JetVector<float> &g, MegBA::JetVector<float> *out); template <typename T> void JetVector_minus_JetVector_CPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), g.getCPUGrad()[i].begin(), out->getCPUGrad()[i].begin(), thrust::minus<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::minus<T>()); } template <typename T> void jetVectorSubScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { out->getCPUGrad() = f.getCPUGrad(); thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::minus<T>()); } template <typename T> void scalarVectorSubJetVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(g.getCPUGrad()[i].begin(), g.getCPUGrad()[i].end(), out->getCPUGrad()[i].begin(), thrust::negate<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::minus<T>()); } template <typename T> void scalarVectorSubScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::minus<T>()); } template <typename T> void vectorSubVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { if (f.getGradShape() != 0) { if (g.getGradShape() != 0) { JetVector_minus_JetVector_CPU(f, g, out); } else { jetVectorSubScalarVectorCPU(f, g, out); } } else { if (g.getGradShape() != 0) { scalarVectorSubJetVectorCPU(f, g, out); } else { scalarVectorSubScalarVectorCPU(f, g, out); } } } template void vectorSubVectorCPU<double>(const MegBA::JetVector<double> &f, const MegBA::JetVector<double> &g, MegBA::JetVector<double> *out); template void vectorSubVectorCPU<float>(const MegBA::JetVector<float> &f, const MegBA::JetVector<float> &g, MegBA::JetVector<float> *out); template <typename T> void jetVectorMulJetVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( f.getCPURes().begin(), f.getCPUGrad()[i].begin(), g.getCPURes().begin(), g.getCPUGrad()[i].begin())), thrust::make_zip_iterator(thrust::make_tuple( f.getCPURes().end(), f.getCPUGrad()[i].end(), g.getCPURes().end(), g.getCPUGrad()[i].end())), out->getCPUGrad()[i].begin(), TT::JetVectorMulJetVectorV<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template <typename T> void jetVectorMulScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), g.getCPURes().begin(), out->getCPUGrad()[i].begin(), thrust::multiplies<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template <typename T> void scalarVectorMulScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template <typename T> void vectorMulVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { if (f.getGradShape() != 0) { if (g.getGradShape() != 0) { jetVectorMulJetVectorCPU(f, g, out); } else { jetVectorMulScalarVectorCPU(f, g, out); } } else { if (g.getGradShape() != 0) { jetVectorMulScalarVectorCPU(g, f, out); } else { scalarVectorMulScalarVectorCPU(f, g, out); } } } template void vectorMulVectorCPU<double>(const MegBA::JetVector<double> &f, const MegBA::JetVector<double> &g, MegBA::JetVector<double> *out); template void vectorMulVectorCPU<float>(const MegBA::JetVector<float> &f, const MegBA::JetVector<float> &g, MegBA::JetVector<float> *out); template <typename T> void jetVectorDivJetVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { std::vector<T> inv_ga(f.getCPURes().size()); thrust::transform(g.getCPURes().begin(), g.getCPURes().end(), inv_ga.begin(), TT::Inverse<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(thrust::make_zip_iterator(thrust::make_tuple( f.getCPURes().begin(), f.getCPUGrad()[i].begin(), inv_ga.begin(), g.getCPUGrad()[i].begin())), thrust::make_zip_iterator(thrust::make_tuple( f.getCPURes().end(), f.getCPUGrad()[i].end(), inv_ga.end(), g.getCPUGrad()[i].end())), out->getCPUGrad()[i].begin(), TT::JetVectorDivJetVectorV<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), inv_ga.begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template <typename T> void jetVectorDivScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { std::vector<T> inv_ga(f.getCPURes().size()); thrust::transform(g.getCPURes().begin(), g.getCPURes().end(), inv_ga.begin(), TT::Inverse<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), inv_ga.begin(), out->getCPUGrad()[i].begin(), thrust::multiplies<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), inv_ga.begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template <typename T> void scalarVectorDivJetVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { std::vector<T> inv_ga(f.getCPURes().size()); thrust::transform(g.getCPURes().begin(), g.getCPURes().end(), inv_ga.begin(), TT::Inverse<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform( thrust::make_zip_iterator(thrust::make_tuple( f.getCPURes().begin(), inv_ga.begin(), g.getCPUGrad()[i].begin())), thrust::make_zip_iterator(thrust::make_tuple( f.getCPURes().end(), inv_ga.end(), g.getCPUGrad()[i].end())), out->getCPUGrad()[i].begin(), TT::ScalarVectorDivJetVectorV<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), inv_ga.begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template <typename T> void scalarVectorDivScalarVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), g.getCPURes().begin(), out->getCPURes().begin(), thrust::divides<T>()); } template <typename T> void vectorDivVectorCPU(const MegBA::JetVector<T> &f, const MegBA::JetVector<T> &g, MegBA::JetVector<T> *out) { if (f.getGradShape() != 0) { if (g.getGradShape() != 0) { jetVectorDivJetVectorCPU(f, g, out); } else { jetVectorDivScalarVectorCPU(f, g, out); } } else { if (g.getGradShape() != 0) { scalarVectorDivJetVectorCPU(f, g, out); } else { scalarVectorDivScalarVectorCPU(f, g, out); } } } template void vectorDivVectorCPU<double>(const MegBA::JetVector<double> &f, const MegBA::JetVector<double> &g, MegBA::JetVector<double> *out); template void vectorDivVectorCPU<float>(const MegBA::JetVector<float> &f, const MegBA::JetVector<float> &g, MegBA::JetVector<float> *out); template <typename T> void jetVectorAddScalarCPU(const MegBA::JetVector<T> &f, T g, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), thrust::make_constant_iterator(g), out->getCPURes().begin(), thrust::plus<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) thrust::copy(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), out->getCPUGrad()[i].begin()); } template void jetVectorAddScalarCPU<double>(const MegBA::JetVector<double> &f, double g, MegBA::JetVector<double> *out); template void jetVectorAddScalarCPU<float>(const MegBA::JetVector<float> &f, float g, MegBA::JetVector<float> *out); template <typename T> void jetVectorSubScalarCPU(const MegBA::JetVector<T> &f, T g, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), thrust::make_constant_iterator(g), out->getCPURes().begin(), thrust::minus<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) thrust::copy(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), out->getCPUGrad()[i].begin()); } template void jetVectorSubScalarCPU<double>(const MegBA::JetVector<double> &f, double g, MegBA::JetVector<double> *out); template void jetVectorSubScalarCPU<float>(const MegBA::JetVector<float> &f, float g, MegBA::JetVector<float> *out); template <typename T> void jetVectorMulScalarCPU(const MegBA::JetVector<T> &f, T g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), thrust::make_constant_iterator(g), out->getCPUGrad()[i].begin(), thrust::multiplies<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), thrust::make_constant_iterator(g), out->getCPURes().begin(), thrust::multiplies<T>()); } template void jetVectorMulScalarCPU<double>(const MegBA::JetVector<double> &f, double g, MegBA::JetVector<double> *out); template void jetVectorMulScalarCPU<float>(const MegBA::JetVector<float> &f, float g, MegBA::JetVector<float> *out); template <typename T> void jetVectorDivScalarCPU(const MegBA::JetVector<T> &f, T g, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), thrust::make_constant_iterator(T(1.) / g), out->getCPUGrad()[i].begin(), thrust::multiplies<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), thrust::make_constant_iterator(T(1.) / g), out->getCPURes().begin(), thrust::multiplies<T>()); } template void jetVectorDivScalarCPU<double>(const MegBA::JetVector<double> &f, double g, MegBA::JetVector<double> *out); template void jetVectorDivScalarCPU<float>(const MegBA::JetVector<float> &f, float g, MegBA::JetVector<float> *out); template <typename T> void scalarSubJetVectorCPU(T f, const JetVector<T> &g, JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(g.getCPUGrad()[i].begin(), g.getCPUGrad()[i].end(), out->getCPUGrad()[i].begin(), thrust::negate<T>()); } thrust::transform(g.getCPURes().begin(), g.getCPURes().end(), out->getCPURes().begin(), TT::ScalarSubJetVector<T>(f)); } template void scalarSubJetVectorCPU<double>(double f, const MegBA::JetVector<double> &g, MegBA::JetVector<double> *out); template void scalarSubJetVectorCPU<float>(float f, const MegBA::JetVector<float> &g, MegBA::JetVector<float> *out); template <typename T> void scalarDivJetVectorCPU(T f, const JetVector<T> &g, JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(g.getCPURes().begin(), g.getCPURes().end(), g.getCPUGrad()[i].begin(), out->getCPUGrad()[i].begin(), TT::ScalarDivJetVectorV<T>(f)); } thrust::transform(g.getCPURes().begin(), g.getCPURes().end(), out->getCPURes().begin(), TT::ScalarDivJetVectorA<T>(f)); } template void scalarDivJetVectorCPU<double>(double f, const MegBA::JetVector<double> &g, MegBA::JetVector<double> *out); template void scalarDivJetVectorCPU<float>(float f, const MegBA::JetVector<float> &g, MegBA::JetVector<float> *out); template <typename T> void absJetVectorCPU(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) { std::vector<T> mask(f.getCPURes().size()); thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), mask.begin(), TT::AbsMask<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPUGrad()[i].begin(), f.getCPUGrad()[i].end(), mask.begin(), out->getCPUGrad()[i].begin(), thrust::multiplies<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), mask.begin(), out->getCPURes().begin(), thrust::multiplies<T>()); } template void absJetVectorCPU<double>(const MegBA::JetVector<double> &f, MegBA::JetVector<double> *out); template void absJetVectorCPU<float>(const JetVector<float> &f, JetVector<float> *out); template <typename T> void cosJetVectorCPU(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), f.getCPUGrad()[i].begin(), out->getCPUGrad()[i].begin(), TT::NegativeSinMul<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), out->getCPURes().begin(), TT::Cos<T>()); } template void cosJetVectorCPU<double>(const MegBA::JetVector<double> &f, MegBA::JetVector<double> *out); template void cosJetVectorCPU<float>(const JetVector<float> &f, JetVector<float> *out); template <typename T> void sinJetVectorCPU(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) { for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), f.getCPUGrad()[i].begin(), out->getCPUGrad()[i].begin(), TT::CosMul<T>()); } thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), out->getCPURes().begin(), TT::Sin<T>()); } template void sinJetVectorCPU<double>(const MegBA::JetVector<double> &f, MegBA::JetVector<double> *out); template void sinJetVectorCPU<float>(const MegBA::JetVector<float> &f, MegBA::JetVector<float> *out); template <typename T> void sqrtJetVectorCPU(const MegBA::JetVector<T> &f, MegBA::JetVector<T> *out) { thrust::transform(f.getCPURes().begin(), f.getCPURes().end(), out->getCPURes().begin(), TT::Sqrt<T>()); for (unsigned int i = 0; i < out->getGradShape(); ++i) { thrust::transform(out->getCPURes().begin(), out->getCPURes().end(), f.getCPUGrad()[i].begin(), out->getCPUGrad()[i].begin(), TT::SqrtJetVectorV<T>()); } } template void sqrtJetVectorCPU<double>(const MegBA::JetVector<double> &f, MegBA::JetVector<double> *out); template void sqrtJetVectorCPU<float>(const MegBA::JetVector<float> &f, MegBA::JetVector<float> *out); } // namespace impl } // namespace math } // namespace MegBA
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; fill-column: 100 -*- */ /* * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #pragma once #include <atomic> #include <string> #include <vector> #include <functional> #include "Protocol.hpp" #include "StringVector.hpp" #include "Log.hpp" /// The payload type used to send/receive data. class Message { public: enum class Type { Text, JSON, Binary }; enum class Dir { In, Out }; /// Construct a text message. /// message must include the full first-line. Message(const std::string& message, const enum Dir dir) : _forwardToken(getForwardToken(message.data(), message.size())), _data(copyDataAfterOffset(message.data(), message.size(), _forwardToken.size())), _tokens(StringVector::tokenize(_data.data(), _data.size())), _id(makeId(dir)), _type(detectType()) { LOG_TRC("Message " << abbr()); } /// Construct a message from a string with type and /// reserve extra space (total, including message). /// message must include the full first-line. Message(const std::string& message, const enum Dir dir, const size_t reserve) : _forwardToken(getForwardToken(message.data(), message.size())), _data(copyDataAfterOffset(message.data(), message.size(), _forwardToken.size())), _tokens(StringVector::tokenize(message.data() + _forwardToken.size(), message.size() - _forwardToken.size())), _id(makeId(dir)), _type(detectType()) { _data.reserve(std::max(reserve, message.size())); LOG_TRC("Message " << abbr()); } /// Construct a message from a character array with type. /// Note: p must include the full first-line. Message(const char* p, const size_t len, const enum Dir dir) : _forwardToken(getForwardToken(p, len)), _data(copyDataAfterOffset(p, len, _forwardToken.size())), _tokens(StringVector::tokenize(_data.data(), _data.size())), _id(makeId(dir)), _type(detectType()) { LOG_TRC("Message " << abbr()); } size_t size() const { return _data.size(); } const std::vector<char>& data() const { return _data; } const StringVector& tokens() const { return _tokens; } const std::string& forwardToken() const { return _forwardToken; } std::string firstToken() const { return _tokens[0]; } bool firstTokenMatches(const std::string& target) const { return _tokens[0] == target; } std::string operator[](size_t index) const { return _tokens[index]; } const std::string& firstLine() { assignFirstLineIfEmpty(); return _firstLine; } bool getTokenInteger(const std::string& name, int& value) { return COOLProtocol::getTokenInteger(_tokens, name, value); } /// Return the abbreviated message for logging purposes. std::string abbr() const { return _id + ' ' + COOLProtocol::getAbbreviatedMessage(_data.data(), _data.size()); } const std::string& id() const { return _id; } /// Returns the json part of the message, if any. std::string jsonString() const { if (_tokens.size() > 1 && _tokens[1].size() && _tokens[1][0] == '{') { const size_t firstTokenSize = _tokens[0].size(); return std::string(_data.data() + firstTokenSize, _data.size() - firstTokenSize); } return std::string(); } /// Append more data to the message. void append(const char* p, const size_t len) { const size_t curSize = _data.size(); _data.resize(curSize + len); std::memcpy(_data.data() + curSize, p, len); } /// Returns true if and only if the payload is considered Binary. bool isBinary() const { return _type == Type::Binary; } /// Allows some in-line re-writing of the message void rewriteDataBody(const std::function<bool (std::vector<char> &)>& func) { // Make sure _firstLine is assigned before we change _data assignFirstLineIfEmpty(); if (func(_data)) { // Check - just the body. assert(_firstLine == COOLProtocol::getFirstLine(_data.data(), _data.size())); assert(_type == detectType()); } } private: /// Constructs a unique ID. static std::string makeId(const enum Dir dir) { static std::atomic<unsigned> Counter; return (dir == Dir::In ? 'i' : 'o') + std::to_string(++Counter); } void assignFirstLineIfEmpty() { if(_firstLine.empty()) { _firstLine = COOLProtocol::getFirstLine(_data.data(), _data.size()); } } Type detectType() const { if (_tokens.equals(0, "tile:") || _tokens.equals(0, "tilecombine:") || _tokens.equals(0, "renderfont:") || _tokens.equals(0, "rendersearchresult:") || _tokens.equals(0, "windowpaint:")) { return Type::Binary; } if (_data.size() > 0 && _data[_data.size() - 1] == '}') { return Type::JSON; } // All others are plain text. return Type::Text; } std::string getForwardToken(const char* buffer, int length) { std::string forward = COOLProtocol::getFirstToken(buffer, length); return (forward.find('-') != std::string::npos ? forward : std::string()); } std::vector<char> copyDataAfterOffset(const char *p, size_t len, size_t fromOffset) { if (!p || fromOffset >= len) return std::vector<char>(); size_t i; for (i = fromOffset; i < len; ++i) { if (p[i] != ' ') break; } if (i < len) return std::vector<char>(p + i, p + len); else return std::vector<char>(); } private: const std::string _forwardToken; std::vector<char> _data; const StringVector _tokens; const std::string _id; std::string _firstLine; const Type _type; }; /* vim:set shiftwidth=4 softtabstop=4 expandtab: */
/* * Copyright (c) 2015-2022 The Khronos Group Inc. * Copyright (c) 2015-2022 Valve Corporation * Copyright (c) 2015-2022 LunarG, Inc. * Copyright (c) 2015-2022 Google, Inc. * Modifications Copyright (C) 2020 Advanced Micro Devices, Inc. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Author: Chia-I Wu <olvaffe@gmail.com> * Author: Chris Forbes <chrisf@ijw.co.nz> * Author: Courtney Goeltzenleuchter <courtney@LunarG.com> * Author: Mark Lobodzinski <mark@lunarg.com> * Author: Mike Stroyan <mike@LunarG.com> * Author: Tobin Ehlis <tobine@google.com> * Author: Tony Barbour <tony@LunarG.com> * Author: Cody Northrop <cnorthrop@google.com> * Author: Dave Houlton <daveh@lunarg.com> * Author: Jeremy Kniager <jeremyk@lunarg.com> * Author: Shannon McPherson <shannon@lunarg.com> * Author: John Zulauf <jzulauf@lunarg.com> * Author: Tobias Hector <tobias.hector@amd.com> */ #include "cast_utils.h" #include "layer_validation_tests.h" #include "core_validation_error_enums.h" TEST_F(VkLayerTest, PSOPolygonModeInvalid) { TEST_DESCRIPTION("Attempt to use invalid polygon fill modes."); VkPhysicalDeviceFeatures device_features = {}; device_features.fillModeNonSolid = VK_FALSE; // The sacrificial device object ASSERT_NO_FATAL_FAILURE(Init(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineRasterizationStateCreateInfo rs_ci = LvlInitStruct<VkPipelineRasterizationStateCreateInfo>(); rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = VK_TRUE; auto set_polygonMode = [&](CreatePipelineHelper &helper) { helper.rs_state_ci_ = rs_ci; }; // Set polygonMode to POINT while the non-solid fill mode feature is disabled. // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_POINT; CreatePipelineHelper::OneshotTest(*this, set_polygonMode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01413"); // Set polygonMode to LINE while the non-solid fill mode feature is disabled. // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_LINE; CreatePipelineHelper::OneshotTest(*this, set_polygonMode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01413"); // Set polygonMode to FILL_RECTANGLE_NV while the extension is not enabled. // Introduce failure by setting unsupported polygon mode rs_ci.polygonMode = VK_POLYGON_MODE_FILL_RECTANGLE_NV; CreatePipelineHelper::OneshotTest(*this, set_polygonMode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01414"); } TEST_F(VkLayerTest, PipelineNotBound) { TEST_DESCRIPTION("Pass in an invalid pipeline object handle into a Vulkan API call."); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-pipeline-parameter"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipeline badPipeline = CastToHandle<VkPipeline, uintptr_t>(0xbaadb1be); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, badPipeline); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PipelineWrongBindPointGraphics) { TEST_DESCRIPTION("Bind a compute pipeline in the graphics bind point"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-pipelineBindPoint-00779"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.CreateComputePipeline(); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PipelineBasicCompute) { TEST_DESCRIPTION("Bind a compute pipeline (no subpasses)"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); const char *cs = R"glsl(#version 450 layout(local_size_x=1) in; layout(set=0, binding=0) uniform block { vec4 x; }; void main(){ vec4 v = 2.0 * x; } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_ = layer_data::make_unique<VkShaderObj>(this, cs, VK_SHADER_STAGE_COMPUTE_BIT); pipe.InitState(); pipe.CreateComputePipeline(); VkBufferObj buffer; auto bci = LvlInitStruct<VkBufferCreateInfo>(); bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 1024; buffer.init(*m_device, bci); pipe.descriptor_set_->WriteDescriptorBufferInfo(0, buffer.handle(), 0, 1024); pipe.descriptor_set_->UpdateDescriptorSets(); m_commandBuffer->begin(); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_.handle(), 0, 1, &pipe.descriptor_set_->set_, 0, nullptr); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_); vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, PipelineWrongBindPointCompute) { TEST_DESCRIPTION("Bind a graphics pipeline in the compute bind point"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-pipelineBindPoint-00780"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.CreateGraphicsPipeline(); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PipelineWrongBindPointRayTracing) { TEST_DESCRIPTION("Bind a graphics pipeline in the ray-tracing bind point"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_RAY_TRACING_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_RAY_TRACING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NV_RAY_TRACING_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-pipelineBindPoint-02392"); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.CreateGraphicsPipeline(); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_RAY_TRACING_NV, pipe.pipeline_); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineBadVertexAttributeFormat) { TEST_DESCRIPTION("Test that pipeline validation catches invalid vertex attribute formats"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attribs; memset(&input_attribs, 0, sizeof(input_attribs)); // Pick a really bad format for this purpose and make sure it should fail input_attribs.format = VK_FORMAT_BC2_UNORM_BLOCK; VkFormatProperties format_props = m_device->format_properties(input_attribs.format); if ((format_props.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != 0) { printf("%s Format unsuitable for test; skipped.\n", kSkipPrefix); return; } input_attribs.location = 0; auto set_info = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &input_binding; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexAttributeDescriptions = &input_attribs; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkVertexInputAttributeDescription-format-00623"); } TEST_F(VkLayerTest, DisabledIndependentBlend) { TEST_DESCRIPTION( "Generate INDEPENDENT_BLEND by disabling independent blend and then specifying different blend states for two " "attachments"); VkPhysicalDeviceFeatures features = {}; features.independentBlend = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorBlendStateCreateInfo-pAttachments-00605"); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineObj pipeline(m_device); // Create a renderPass with two color attachments VkAttachmentReference attachments[2] = {}; attachments[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachments[1].attachment = 1; attachments[1].layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = attachments; subpass.colorAttachmentCount = 2; VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(); rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = attach_desc; VkRenderPass renderpass; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); pipeline.AddShader(&vs); VkPipelineColorBlendAttachmentState att_state1 = {}, att_state2 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; att_state2.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state2.blendEnable = VK_FALSE; pipeline.AddColorAttachment(0, att_state1); pipeline.AddColorAttachment(1, att_state2); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); m_errorMonitor->VerifyFound(); vk::DestroyRenderPass(m_device->device(), renderpass, NULL); } TEST_F(VkLayerTest, BlendingOnFormatWithoutBlendingSupport) { TEST_DESCRIPTION("Test that blending is not enabled with a format not support blending"); VkPhysicalDeviceFeatures features = {}; features.independentBlend = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06041"); VkFormat non_blending_format = VK_FORMAT_UNDEFINED; for (uint32_t i = 1; i <= VK_FORMAT_ASTC_12x12_SRGB_BLOCK; i++) { VkFormatProperties format_props = m_device->format_properties(static_cast<VkFormat>(i)); if ((format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT) && !(format_props.optimalTilingFeatures & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BLEND_BIT)) { non_blending_format = static_cast<VkFormat>(i); break; } } if (non_blending_format == VK_FORMAT_UNDEFINED) { printf("%s Unable to find a color attachment format with no blending support. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineObj pipeline(m_device); // Create a renderPass with two color attachments VkAttachmentReference attachment = {}; attachment.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attachment; subpass.colorAttachmentCount = 1; VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(); rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = non_blending_format; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = &attach_desc; VkRenderPass rp; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &rp); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); pipeline.AddShader(&vs); VkPipelineColorBlendAttachmentState att_state = {}; att_state.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state.blendEnable = VK_TRUE; pipeline.AddColorAttachment(0, att_state); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), rp); m_errorMonitor->VerifyFound(); vk::DestroyRenderPass(m_device->device(), rp, NULL); } // Is the Pipeline compatible with the expectations of the Renderpass/subpasses? TEST_F(VkLayerTest, PipelineRenderpassCompatibility) { TEST_DESCRIPTION( "Create a graphics pipeline that is incompatible with the requirements of its contained Renderpass/subpasses."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; auto set_info = [&](CreatePipelineHelper &helper) { helper.cb_attachments_[0] = att_state1; helper.gp_ci_.pColorBlendState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06044"); } TEST_F(VkLayerTest, PointSizeFailure) { TEST_DESCRIPTION("Create a pipeline using TOPOLOGY_POINT_LIST but do not set PointSize in vertex shader."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize but not writing to it const char NoPointSizeVertShader[] = R"glsl( #version 450 vec2 vertices[3]; out gl_PerVertex { vec4 gl_Position; float gl_PointSize; }; void main() { vertices[0] = vec2(-1.0, -1.0); vertices[1] = vec2( 1.0, -1.0); vertices[2] = vec2( 0.0, 1.0); gl_Position = vec4(vertices[gl_VertexIndex % 3], 0.0, 1.0); } )glsl"; VkShaderObj vs(this, NoPointSizeVertShader, VK_SHADER_STAGE_VERTEX_BIT); // Set Input Assembly to TOPOLOGY POINT LIST auto set_info = [&](CreatePipelineHelper &helper) { // Set Input Assembly to TOPOLOGY POINT LIST helper.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Pipeline topology is set to POINT_LIST"); } TEST_F(VkLayerTest, InvalidTopology) { TEST_DESCRIPTION("InvalidTopology."); VkPhysicalDeviceFeatures deviceFeatures = {}; deviceFeatures.geometryShader = VK_FALSE; deviceFeatures.tessellationShader = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&deviceFeatures)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(this, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkPrimitiveTopology topology; auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = topology; helper.ia_ci_.primitiveRestartEnable = VK_TRUE; helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428"); topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428"); topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428"); topology = VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, std::vector<string>{"VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429"}); topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, std::vector<string>{"VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429"}); topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, std::vector<string>{"VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00428", "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430", "VUID-VkGraphicsPipelineCreateInfo-topology-00737"}); topology = VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429"); topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00429"); } TEST_F(VkLayerTest, PrimitiveTopologyListRestart) { TEST_DESCRIPTION("Test VK_EXT_primitive_topology_list_restart"); uint32_t version = SetTargetApiVersion(VK_API_VERSION_1_1); if (version < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix); return; } auto ptl_restart_features = LvlInitStruct<VkPhysicalDevicePrimitiveTopologyListRestartFeaturesEXT>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&ptl_restart_features); m_device_extension_names.push_back(VK_EXT_PRIMITIVE_TOPOLOGY_LIST_RESTART_EXTENSION_NAME); bool retval = InitFrameworkAndRetrieveFeatures(features2); if (!retval) { printf("%s Error initializing extensions or retrieving features, skipping test\n", kSkipPrefix); return; } if (!ptl_restart_features.primitiveTopologyListRestart) { printf("%s primitive topology list restart feature is not available, skipping test\n", kSkipPrefix); return; } ptl_restart_features.primitiveTopologyListRestart = false; ptl_restart_features.primitiveTopologyPatchListRestart = false; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(this, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkPrimitiveTopology topology; auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = topology; helper.ia_ci_.primitiveRestartEnable = VK_TRUE; helper.shader_stages_ = { vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo() }; }; topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-06252"); topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, std::vector<string>{"VUID-VkPipelineInputAssemblyStateCreateInfo-topology-06253", "VUID-VkGraphicsPipelineCreateInfo-topology-00737"}); } TEST_F(VkLayerTest, PointSizeGeomShaderFailure) { TEST_DESCRIPTION( "Create a pipeline using TOPOLOGY_POINT_LIST, set PointSize vertex shader, but not in the final geometry stage."); ASSERT_NO_FATAL_FAILURE(Init()); if ((!m_device->phy().features().geometryShader) || (!m_device->phy().features().shaderTessellationAndGeometryPointSize)) { printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Create VS declaring PointSize and writing to it static char const *gsSource = R"glsl( #version 450 layout (points) in; layout (points) out; layout (max_vertices = 1) out; void main() { gl_Position = vec4(1.0, 0.5, 0.5, 0.0); EmitVertex(); } )glsl"; VkShaderObj vs(this, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT); auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; helper.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Pipeline topology is set to POINT_LIST"); } TEST_F(VkLayerTest, BuiltinBlockOrderMismatchVsGs) { TEST_DESCRIPTION("Use different order of gl_Position and gl_PointSize in builtin block interface between VS and GS."); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().geometryShader || !m_device->phy().features().shaderTessellationAndGeometryPointSize) { printf("%s Device does not support geometry shaders; Skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Compiled using the GLSL code below. GlslangValidator rearranges the members, but here they are kept in the order provided. // #version 450 // layout (points) in; // layout (points) out; // layout (max_vertices = 1) out; // in gl_PerVertex { // float gl_PointSize; // vec4 gl_Position; // } gl_in[]; // void main() { // gl_Position = gl_in[0].gl_Position; // gl_PointSize = gl_in[0].gl_PointSize; // EmitVertex(); // } const std::string gsSource = R"( OpCapability Geometry OpCapability GeometryPointSize %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" %_ %gl_in OpExecutionMode %main InputPoints OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputPoints OpExecutionMode %main OutputVertices 1 OpSource GLSL 450 OpMemberDecorate %gl_PerVertex 0 BuiltIn Position OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance OpDecorate %gl_PerVertex Block OpMemberDecorate %gl_PerVertex_0 0 BuiltIn PointSize OpMemberDecorate %gl_PerVertex_0 1 BuiltIn Position OpDecorate %gl_PerVertex_0 Block %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v4float = OpTypeVector %float 4 %uint = OpTypeInt 32 0 %uint_1 = OpConstant %uint 1 %_arr_float_uint_1 = OpTypeArray %float %uint_1 %gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 %_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex %_ = OpVariable %_ptr_Output_gl_PerVertex Output %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %gl_PerVertex_0 = OpTypeStruct %float %v4float %_arr_gl_PerVertex_0_uint_1 = OpTypeArray %gl_PerVertex_0 %uint_1 %_ptr_Input__arr_gl_PerVertex_0_uint_1 = OpTypePointer Input %_arr_gl_PerVertex_0_uint_1 %gl_in = OpVariable %_ptr_Input__arr_gl_PerVertex_0_uint_1 Input %_ptr_Input_v4float = OpTypePointer Input %v4float %_ptr_Output_v4float = OpTypePointer Output %v4float %int_1 = OpConstant %int 1 %_ptr_Input_float = OpTypePointer Input %float %_ptr_Output_float = OpTypePointer Output %float %main = OpFunction %void None %3 %5 = OpLabel %21 = OpAccessChain %_ptr_Input_v4float %gl_in %int_0 %int_1 %22 = OpLoad %v4float %21 %24 = OpAccessChain %_ptr_Output_v4float %_ %int_0 OpStore %24 %22 %27 = OpAccessChain %_ptr_Input_float %gl_in %int_0 %int_0 %28 = OpLoad %float %27 %30 = OpAccessChain %_ptr_Output_float %_ %int_1 OpStore %30 %28 OpEmitVertex OpReturn OpFunctionEnd )"; VkShaderObj vs(this, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; helper.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Builtin variable inside block doesn't match between"); } TEST_F(VkLayerTest, BuiltinBlockSizeMismatchVsGs) { TEST_DESCRIPTION("Use different number of elements in builtin block interface between VS and GS."); ASSERT_NO_FATAL_FAILURE(Init()); if (!m_device->phy().features().geometryShader || !m_device->phy().features().shaderTessellationAndGeometryPointSize) { printf("%s Device does not support geometry shaders; Skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); static const char *gsSource = R"glsl( #version 450 layout (points) in; layout (points) out; layout (max_vertices = 1) out; in gl_PerVertex { vec4 gl_Position; float gl_PointSize; float gl_ClipDistance[]; } gl_in[]; void main() { gl_Position = gl_in[0].gl_Position; gl_PointSize = gl_in[0].gl_PointSize; EmitVertex(); } )glsl"; VkShaderObj vs(this, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT); auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_POINT_LIST; helper.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Number of elements inside builtin block differ between stages"); } TEST_F(VkLayerTest, CreatePipelineLayoutExceedsSetLimit) { TEST_DESCRIPTION("Attempt to create a pipeline layout using more than the physical limit of SetLayouts."); ASSERT_NO_FATAL_FAILURE(Init()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = LvlInitStruct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &layout_binding; VkDescriptorSetLayout ds_layout = {}; VkResult err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); // Create an array of DSLs, one larger than the physical limit const auto excess_layouts = 1 + m_device->phy().properties().limits.maxBoundDescriptorSets; std::vector<VkDescriptorSetLayout> dsl_array(excess_layouts, ds_layout); VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.setLayoutCount = excess_layouts; pipeline_layout_ci.pSetLayouts = dsl_array.data(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-setLayoutCount-00286"); VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Clean up vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineExcessSubsampledPerStageDescriptors) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total subsampled descriptors exceed limits"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); // Check extension support if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME)) { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_FRAGMENT_DENSITY_MAP_2_EXTENSION_NAME); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); VkPhysicalDeviceFragmentDensityMap2PropertiesEXT density_map2_properties = LvlInitStruct<VkPhysicalDeviceFragmentDensityMap2PropertiesEXT>(); VkPhysicalDeviceProperties2KHR properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&density_map2_properties); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t max_subsampled_samplers = density_map2_properties.maxDescriptorSetSubsampledSamplers; // Note: Adding this check in case mock ICDs don't initialize min-max values correctly if (max_subsampled_samplers == 0) { printf("%s maxDescriptorSetSubsampledSamplers limit (%d) must be greater than 0. Skipping.\n", kSkipPrefix, max_subsampled_samplers); return; } if (max_subsampled_samplers >= properties2.properties.limits.maxDescriptorSetSamplers) { printf("%s test assumes maxDescriptorSetSubsampledSamplers limit (%d) is less than overall sampler limit (%d). Skipping.\n", kSkipPrefix, max_subsampled_samplers, properties2.properties.limits.maxDescriptorSetSamplers); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = LvlInitStruct<VkDescriptorSetLayoutCreateInfo>(); VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; VkSamplerCreateInfo sampler_info = SafeSaneSamplerCreateInfo(); sampler_info.flags |= VK_SAMPLER_CREATE_SUBSAMPLED_BIT_EXT; VkSampler sampler = VK_NULL_HANDLE; VkResult err = vk::CreateSampler(m_device->device(), &sampler_info, NULL, &sampler); ASSERT_VK_SUCCESS(err); // just make all the immutable samplers point to the same sampler std::vector<VkSampler> immutableSamplers; immutableSamplers.resize(max_subsampled_samplers); for (uint32_t sampler_idx = 0; sampler_idx < max_subsampled_samplers; sampler_idx++) { immutableSamplers[sampler_idx] = sampler; } // VU 03566 - too many subsampled sampler type descriptors across stages dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = max_subsampled_samplers; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = &immutableSamplers[0]; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = max_subsampled_samplers; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_sampler_vuid = "VUID-VkPipelineLayoutCreateInfo-pImmutableSamplers-03566"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_sampler_vuid); err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessPerStageDescriptors) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed per-stage limits"); bool descriptor_indexing = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (descriptor_indexing) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); descriptor_indexing = descriptor_indexing && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_3_EXTENSION_NAME); descriptor_indexing = descriptor_indexing && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); if (descriptor_indexing) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE_3_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_combined = std::min(max_samplers, max_sampled_images); uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_properties = {}; if (descriptor_indexing) { descriptor_indexing_properties = GetDescriptorIndexingProperties(instance(), gpu()); } // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({max_uniform_buffers, max_storage_buffers, max_sampled_images, max_storage_images, max_samplers})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = LvlInitStruct<VkDescriptorSetLayoutCreateInfo>(); VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe0023e - too many sampler type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_combined; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_sampler_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_sampler_vuid); if ((max_samplers + max_combined) > sum_samplers) { const char *max_all_sampler_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampler_vuid); // expect all-stages sum too } if (max_combined > sum_sampled_images) { const char *max_all_sampled_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampled_image_vuid); // expect all-stages sum too } if (descriptor_indexing) { if ((max_samplers + max_combined) > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036"); } if ((max_samplers + max_combined) > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022"); } if (max_combined > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041"); } if (max_combined > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00240 - too many uniform buffer type descriptors in vertex stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = max_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_uniform_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_uniform_vuid); if (dslb.descriptorCount > sum_uniform_buffers) { const char *max_all_uniform_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_uniform_vuid); // expect all-stages sum too } if (dslb.descriptorCount > sum_dyn_uniform_buffers) { const char *max_all_uniform_dynamic_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_uniform_dynamic_vuid); // expect all-stages sum too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038"); } if ((dslb.descriptorCount * 2) > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindUniformBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00242 - too many storage buffer type descriptors in compute stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = max_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_ALL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_storage_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_storage_vuid); if (dslb.descriptorCount > sum_dyn_storage_buffers) { const char *max_all_storage_dynamic_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_storage_dynamic_vuid); // expect all-stages sum too } const uint32_t storage_buffer_count = dslb_vec[0].descriptorCount + dslb_vec[2].descriptorCount; if (storage_buffer_count > sum_storage_buffers) { const char *max_all_storage_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_storage_vuid); // expect all-stages sum too } if (descriptor_indexing) { if (storage_buffer_count > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindStorageBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040"); } if ((dslb.descriptorCount * 3) > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00244 - too many sampled image type descriptors in multiple stages dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; dslb.descriptorCount = max_sampled_images; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_ALL_GRAPHICS; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorCount = max_combined; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_sample_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_sample_image_vuid); const uint32_t sampled_image_count = max_combined + 2 * max_sampled_images; if (sampled_image_count > sum_sampled_images) { const char *max_all_sampled_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampled_image_vuid); // expect all-stages sum too } if (max_combined > sum_samplers) { const char *max_all_sampler_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampler_vuid); // expect all-stages sum too } if (descriptor_indexing) { if (sampled_image_count > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041"); } if (sampled_image_count > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025"); } if (max_combined > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036"); } if (max_combined > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00246 - too many storage image type descriptors in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = 1 + (max_storage_images / 2); dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_storage_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_storage_image_vuid); const uint32_t storage_image_count = 2 * dslb.descriptorCount; if (storage_image_count > sum_storage_images) { const char *max_all_storage_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_storage_image_vuid); // expect all-stages sum too } if (descriptor_indexing) { if (storage_image_count > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindStorageImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042"); } if (storage_image_count > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindStorageImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d18 - too many input attachments in fragment stage dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = 1 + max_input_attachments; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_input_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_input_vuid); if (dslb.descriptorCount > sum_input_attachments) { const char *max_all_input_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_input_vuid); // expect all-stages sum too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindInputAttachments) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindInputAttachments) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, CreatePipelineLayoutExcessDescriptorsOverall) { TEST_DESCRIPTION("Attempt to create a pipeline layout where total descriptors exceed limits"); bool descriptor_indexing = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (descriptor_indexing) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); descriptor_indexing = descriptor_indexing && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_3_EXTENSION_NAME); descriptor_indexing = descriptor_indexing && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); if (descriptor_indexing) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE_3_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t max_uniform_buffers = m_device->phy().properties().limits.maxPerStageDescriptorUniformBuffers; uint32_t max_storage_buffers = m_device->phy().properties().limits.maxPerStageDescriptorStorageBuffers; uint32_t max_sampled_images = m_device->phy().properties().limits.maxPerStageDescriptorSampledImages; uint32_t max_storage_images = m_device->phy().properties().limits.maxPerStageDescriptorStorageImages; uint32_t max_samplers = m_device->phy().properties().limits.maxPerStageDescriptorSamplers; uint32_t max_input_attachments = m_device->phy().properties().limits.maxPerStageDescriptorInputAttachments; uint32_t sum_dyn_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffersDynamic; uint32_t sum_uniform_buffers = m_device->phy().properties().limits.maxDescriptorSetUniformBuffers; uint32_t sum_dyn_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffersDynamic; uint32_t sum_storage_buffers = m_device->phy().properties().limits.maxDescriptorSetStorageBuffers; uint32_t sum_sampled_images = m_device->phy().properties().limits.maxDescriptorSetSampledImages; uint32_t sum_storage_images = m_device->phy().properties().limits.maxDescriptorSetStorageImages; uint32_t sum_samplers = m_device->phy().properties().limits.maxDescriptorSetSamplers; uint32_t sum_input_attachments = m_device->phy().properties().limits.maxDescriptorSetInputAttachments; VkPhysicalDeviceDescriptorIndexingProperties descriptor_indexing_properties = {}; if (descriptor_indexing) { descriptor_indexing_properties = GetDescriptorIndexingProperties(instance(), gpu()); } // Devices that report UINT32_MAX for any of these limits can't run this test if (UINT32_MAX == std::max({sum_dyn_uniform_buffers, sum_uniform_buffers, sum_dyn_storage_buffers, sum_storage_buffers, sum_sampled_images, sum_storage_images, sum_samplers, sum_input_attachments})) { printf("%s Physical device limits report as 2^32-1. Skipping test.\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dslb = {}; std::vector<VkDescriptorSetLayoutBinding> dslb_vec = {}; VkDescriptorSetLayout ds_layout = VK_NULL_HANDLE; VkDescriptorSetLayoutCreateInfo ds_layout_ci = LvlInitStruct<VkDescriptorSetLayoutCreateInfo>(); VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; // VU 0fe00d1a - too many sampler type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; dslb.descriptorCount = sum_samplers / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = sum_samplers - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); VkResult err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_sampler_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03028" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01677"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampler_vuid); if (dslb.descriptorCount > max_samplers) { const char *max_sampler_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03016" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00287"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_sampler_vuid); // Expect max-per-stage samplers exceeds limits } if (dslb.descriptorCount > sum_sampled_images) { const char *max_all_sampled_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampled_image_vuid); // Expect max overall sampled image count exceeds limits } if (dslb.descriptorCount > max_sampled_images) { const char *max_sample_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_sample_image_vuid); // Expect max per-stage sampled image count exceeds limits } if (descriptor_indexing) { if ((sum_samplers + 1) > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036"); } if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022"); } if (dslb_vec[1].descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041"); } if (dslb_vec[1].descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1c - too many uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dslb.descriptorCount = sum_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_uniform_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03029" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01678"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_uniform_vuid); if (dslb.descriptorCount > max_uniform_buffers) { const char *max_uniform_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_uniform_vuid); // expect max-per-stage too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindUniformBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03037"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d1e - too many dynamic uniform buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_uniform_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_uniform_dynamic_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03030" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01679"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_uniform_dynamic_vuid); if (dslb.descriptorCount > max_uniform_buffers) { const char *max_uniform_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03017" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00288"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_uniform_vuid); // expect max-per-stage too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindUniformBuffersDynamic) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03038"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindUniformBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03023"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d20 - too many storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; dslb.descriptorCount = sum_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_storage_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03031" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01680"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_storage_vuid); if (dslb.descriptorCount > max_storage_buffers) { const char *max_storage_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_storage_vuid); // expect max-per-stage too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindStorageBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03039"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d22 - too many dynamic storage buffer type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC; dslb.descriptorCount = sum_dyn_storage_buffers + 1; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_storage_dynamic_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03032" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01681"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_storage_dynamic_vuid); if (dslb.descriptorCount > max_storage_buffers) { const char *max_storage_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03018" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00289"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_storage_vuid); // expect max-per-stage too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindStorageBuffersDynamic) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03040"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindStorageBuffers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03024"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d24 - too many sampled image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; dslb.descriptorCount = max_samplers; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; // revisit: not robust to odd limits. uint32_t remaining = (max_samplers > sum_sampled_images ? 0 : (sum_sampled_images - max_samplers) / 2); dslb.descriptorCount = 1 + remaining; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); dslb.binding = 2; dslb.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER; dslb.stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_sampled_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03033" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01682"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_sampled_image_vuid); // Takes max since VUID only checks per shader stage if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > max_sampled_images) { const char *max_sample_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03019" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00290"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_sample_image_vuid); // Expect max-per-stage sampled images to exceed limits } if (descriptor_indexing) { if (max_samplers > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03036"); } if (max_samplers > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSamplers) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03022"); } if ((dslb_vec[0].descriptorCount + dslb_vec[1].descriptorCount) > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03041"); } if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindSampledImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03025"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d26 - too many storage image type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; dslb.descriptorCount = sum_storage_images / 2; dslb.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); dslb.binding = 1; dslb.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; dslb.descriptorCount = sum_storage_images - dslb.descriptorCount + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_storage_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03034" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01683"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_storage_image_vuid); if (dslb.descriptorCount > max_storage_images) { const char *max_storage_image_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03020" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00291"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_storage_image_vuid); // expect max-per-stage too } if (descriptor_indexing) { if ((sum_storage_images + 1) > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindStorageImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03042"); } if (std::max(dslb_vec[0].descriptorCount, dslb_vec[1].descriptorCount) > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindStorageImages) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03026"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); // VU 0fe00d28 - too many input attachment type descriptors overall dslb_vec.clear(); dslb.binding = 0; dslb.descriptorType = VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT; dslb.descriptorCount = sum_input_attachments + 1; dslb.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dslb.pImmutableSamplers = NULL; dslb_vec.push_back(dslb); ds_layout_ci.bindingCount = dslb_vec.size(); ds_layout_ci.pBindings = dslb_vec.data(); err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); const char *max_all_input_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03035" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01684"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_all_input_vuid); if (dslb.descriptorCount > max_input_attachments) { const char *max_input_vuid = (descriptor_indexing) ? "VUID-VkPipelineLayoutCreateInfo-descriptorType-03021" : "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-01676"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, max_input_vuid); // expect max-per-stage too } if (descriptor_indexing) { if (dslb.descriptorCount > descriptor_indexing_properties.maxDescriptorSetUpdateAfterBindInputAttachments) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-03043"); } if (dslb.descriptorCount > descriptor_indexing_properties.maxPerStageDescriptorUpdateAfterBindInputAttachments) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-descriptorType-03027"); } } err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); // Unnecessary but harmless if test passed pipeline_layout = VK_NULL_HANDLE; vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); } TEST_F(VkLayerTest, InvalidCmdBufferPipelineDestroyed) { TEST_DESCRIPTION("Attempt to draw with a command buffer that is invalid due to a pipeline dependency being destroyed."); ASSERT_NO_FATAL_FAILURE(Init()); if (IsPlatform(kNexusPlayer)) { printf("%s This test should not run on Nexus Player\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); { // Use helper to create graphics pipeline CreatePipelineHelper helper(*this); helper.InitInfo(); helper.InitState(); helper.CreateGraphicsPipeline(); // Bind helper pipeline to command buffer m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, helper.pipeline_); m_commandBuffer->end(); // pipeline will be destroyed when helper goes out of scope } // Cause error by submitting command buffer that references destroyed pipeline m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-InvalidCommandBuffer-VkPipeline"); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPipeline) { SetTargetApiVersion(VK_API_VERSION_1_2); uint64_t fake_pipeline_handle = 0xbaad6001; VkPipeline bad_pipeline = reinterpret_cast<VkPipeline &>(fake_pipeline_handle); // Enable VK_KHR_draw_indirect_count for KHR variants ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); VkPhysicalDeviceVulkan12Features features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>(); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); if (DeviceValidationVersion() >= VK_API_VERSION_1_2) { features12.drawIndirectCount = VK_TRUE; } } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features12)); bool has_khr_indirect = DeviceExtensionEnabled(VK_KHR_DRAW_INDIRECT_COUNT_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Attempt to bind an invalid Pipeline to a valid Command Buffer m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-pipeline-parameter"); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, bad_pipeline); m_errorMonitor->VerifyFound(); // Try each of the 6 flavors of Draw() m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Draw*() calls must be submitted within a renderpass m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-None-02700"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexed-None-02700"); m_commandBuffer->DrawIndexed(1, 1, 0, 0, 0); m_errorMonitor->VerifyFound(); VkBufferObj buffer; VkBufferCreateInfo ci = LvlInitStruct<VkBufferCreateInfo>(); ci.usage = VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT; ci.size = 1024; buffer.init(*m_device, ci); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirect-None-02700"); vk::CmdDrawIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 1, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirect-None-02700"); vk::CmdDrawIndexedIndirect(m_commandBuffer->handle(), buffer.handle(), 0, 1, 0); m_errorMonitor->VerifyFound(); if (has_khr_indirect) { auto fpCmdDrawIndirectCountKHR = (PFN_vkCmdDrawIndirectCountKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdDrawIndirectCountKHR"); ASSERT_NE(fpCmdDrawIndirectCountKHR, nullptr); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirectCount-None-02700"); // stride must be a multiple of 4 and must be greater than or equal to sizeof(VkDrawIndirectCommand) fpCmdDrawIndirectCountKHR(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 512); m_errorMonitor->VerifyFound(); if (DeviceValidationVersion() >= VK_API_VERSION_1_2) { auto fpCmdDrawIndirectCount = (PFN_vkCmdDrawIndirectCount)vk::GetDeviceProcAddr(m_device->device(), "vkCmdDrawIndirectCount"); if (nullptr == fpCmdDrawIndirectCount) { m_errorMonitor->ExpectSuccess(); m_errorMonitor->SetError("No ProcAddr for 1.2 core vkCmdDrawIndirectCount"); m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndirectCount-None-02700"); // stride must be a multiple of 4 and must be greater than or equal to sizeof(VkDrawIndirectCommand) fpCmdDrawIndirectCount(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 512); m_errorMonitor->VerifyFound(); } } auto fpCmdDrawIndexedIndirectCountKHR = (PFN_vkCmdDrawIndexedIndirectCountKHR)vk::GetDeviceProcAddr(m_device->device(), "vkCmdDrawIndexedIndirectCountKHR"); ASSERT_NE(fpCmdDrawIndexedIndirectCountKHR, nullptr); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirectCount-None-02700"); // stride must be a multiple of 4 and must be greater than or equal to sizeof(VkDrawIndexedIndirectCommand) fpCmdDrawIndexedIndirectCountKHR(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 512); m_errorMonitor->VerifyFound(); if (DeviceValidationVersion() >= VK_API_VERSION_1_2) { auto fpCmdDrawIndexedIndirectCount = (PFN_vkCmdDrawIndexedIndirectCount)vk::GetDeviceProcAddr(m_device->device(), "vkCmdDrawIndexedIndirectCount"); if (nullptr == fpCmdDrawIndexedIndirectCount) { m_errorMonitor->ExpectSuccess(); m_errorMonitor->SetError("No ProcAddr for 1.2 core vkCmdDrawIndirectCount"); m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDrawIndexedIndirectCount-None-02700"); // stride must be a multiple of 4 and must be greater than or equal to sizeof(VkDrawIndexedIndirectCommand) fpCmdDrawIndexedIndirectCount(m_commandBuffer->handle(), buffer.handle(), 0, buffer.handle(), 512, 1, 512); m_errorMonitor->VerifyFound(); } } } // Also try the Dispatch variants vk::CmdEndRenderPass(m_commandBuffer->handle()); // Compute submissions must be outside a renderpass m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-None-02700"); vk::CmdDispatch(m_commandBuffer->handle(), 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchIndirect-None-02700"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchIndirect-offset-00407"); vk::CmdDispatchIndirect(m_commandBuffer->handle(), buffer.handle(), ci.size); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CmdDispatchExceedLimits) { TEST_DESCRIPTION("Compute dispatch with dimensions that exceed device limits"); // Enable KHX device group extensions, if available if (InstanceExtensionSupported(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_DEVICE_GROUP_CREATION_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); bool khx_dg_ext_available = false; if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_DEVICE_GROUP_EXTENSION_NAME); khx_dg_ext_available = true; } ASSERT_NO_FATAL_FAILURE(InitState()); uint32_t x_count_limit = m_device->props.limits.maxComputeWorkGroupCount[0]; uint32_t y_count_limit = m_device->props.limits.maxComputeWorkGroupCount[1]; uint32_t z_count_limit = m_device->props.limits.maxComputeWorkGroupCount[2]; if (std::max({x_count_limit, y_count_limit, z_count_limit}) == UINT32_MAX) { printf("%s device maxComputeWorkGroupCount limit reports UINT32_MAX, test not possible, skipping.\n", kSkipPrefix); return; } uint32_t x_size_limit = m_device->props.limits.maxComputeWorkGroupSize[0]; uint32_t y_size_limit = m_device->props.limits.maxComputeWorkGroupSize[1]; uint32_t z_size_limit = m_device->props.limits.maxComputeWorkGroupSize[2]; std::string spv_source = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize )"; spv_source.append(std::to_string(x_size_limit + 1) + " " + std::to_string(y_size_limit + 1) + " " + std::to_string(z_size_limit + 1)); spv_source.append(R"( %void = OpTypeVoid %3 = OpTypeFunction %void %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd)"); CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-x-06429"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-y-06430"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-z-06431"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-x-06432"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); // Create a minimal compute pipeline x_size_limit = (x_size_limit > 1024) ? 1024 : x_size_limit; y_size_limit = (y_size_limit > 1024) ? 1024 : y_size_limit; z_size_limit = (z_size_limit > 64) ? 64 : z_size_limit; uint32_t invocations_limit = m_device->props.limits.maxComputeWorkGroupInvocations; x_size_limit = (x_size_limit > invocations_limit) ? invocations_limit : x_size_limit; invocations_limit /= x_size_limit; y_size_limit = (y_size_limit > invocations_limit) ? invocations_limit : y_size_limit; invocations_limit /= y_size_limit; z_size_limit = (z_size_limit > invocations_limit) ? invocations_limit : z_size_limit; char cs_text[128] = ""; sprintf(cs_text, "#version 450\nlayout(local_size_x = %d, local_size_y = %d, local_size_z = %d) in;\nvoid main() {}\n", x_size_limit, y_size_limit, z_size_limit); VkShaderObj cs_obj(this, cs_text, VK_SHADER_STAGE_COMPUTE_BIT); pipe.cs_.reset(new VkShaderObj(this, cs_text, VK_SHADER_STAGE_COMPUTE_BIT)); pipe.CreateComputePipeline(); // Bind pipeline to command buffer m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_); // Dispatch counts that exceed device limits m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-groupCountX-00386"); vk::CmdDispatch(m_commandBuffer->handle(), x_count_limit + 1, y_count_limit, z_count_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-groupCountY-00387"); vk::CmdDispatch(m_commandBuffer->handle(), x_count_limit, y_count_limit + 1, z_count_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-groupCountZ-00388"); vk::CmdDispatch(m_commandBuffer->handle(), x_count_limit, y_count_limit, z_count_limit + 1); m_errorMonitor->VerifyFound(); if (khx_dg_ext_available) { PFN_vkCmdDispatchBaseKHR fp_vkCmdDispatchBaseKHR = (PFN_vkCmdDispatchBaseKHR)vk::GetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-baseGroupX-00427"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), 1, 1, 1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Base equals or exceeds limit m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-baseGroupX-00421"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_count_limit, y_count_limit - 1, z_count_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-baseGroupX-00422"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_count_limit - 1, y_count_limit, z_count_limit - 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-baseGroupZ-00423"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_count_limit - 1, y_count_limit - 1, z_count_limit, 0, 0, 0); m_errorMonitor->VerifyFound(); // (Base + count) exceeds limit uint32_t x_base = x_count_limit / 2; uint32_t y_base = y_count_limit / 2; uint32_t z_base = z_count_limit / 2; x_count_limit -= x_base; y_count_limit -= y_base; z_count_limit -= z_base; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-groupCountX-00424"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_count_limit + 1, y_count_limit, z_count_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-groupCountY-00425"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_count_limit, y_count_limit + 1, z_count_limit); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatchBase-groupCountZ-00426"); fp_vkCmdDispatchBaseKHR(m_commandBuffer->handle(), x_base, y_base, z_base, x_count_limit, y_count_limit, z_count_limit + 1); m_errorMonitor->VerifyFound(); } else { printf("%s KHX_DEVICE_GROUP_* extensions not supported, skipping CmdDispatchBaseKHR() tests.\n", kSkipPrefix); } } TEST_F(VkLayerTest, InvalidPipelineCreateState) { TEST_DESCRIPTION("Create Pipelines with invalid state set"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); // Attempt to Create Gfx Pipeline w/o a VS VkPipelineShaderStageCreateInfo shaderStage = fs.GetStageCreateInfo(); // should be: vs.GetStageCreateInfo(); auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {shaderStage}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-stage-00727"); // Finally, check the string validation for the shader stage pName variable. Correct the shader stage data, and bork the // string before calling again shaderStage = vs.GetStageCreateInfo(); const uint8_t cont_char = 0xf8; char bad_string[] = {static_cast<char>(cont_char), static_cast<char>(cont_char), static_cast<char>(cont_char), static_cast<char>(cont_char)}; shaderStage.pName = bad_string; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "contains invalid characters or is badly formed"); } TEST_F(VkLayerTest, InvalidPipelineCreateStateBadStageBit) { TEST_DESCRIPTION("Create Pipelines with invalid state set"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); // Make sure compute pipeline has a compute shader stage set char const *csSource = R"glsl( #version 450 layout(local_size_x=1, local_size_y=1, local_size_z=1) in; void main(){ if (gl_GlobalInvocationID.x >= 0) { return; } } )glsl"; CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_VERTEX_BIT; // override with wrong value m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkComputePipelineCreateInfo-stage-00701"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MissingStorageImageFormatRead) { TEST_DESCRIPTION("Create a shader reading a storage image without an image format"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (feat.shaderStorageImageReadWithoutFormat) { printf("%s format less storage image read supported.\n", kSkipPrefix); return; } // Checks based off shaderStorageImage(Read|Write)WithoutFormat are // disabled if VK_KHR_format_feature_flags2 is supported. // // https://github.com/KhronosGroup/Vulkan-Docs/blob/6177645341afc/appendices/spirvenv.txt#L553 // if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME)) { printf("%s %s supported, skipping.\n", kSkipPrefix, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); return; } // Make sure compute pipeline has a compute shader stage set const std::string csSource = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %4 "main" OpExecutionMode %4 LocalSize 1 1 1 OpSource GLSL 450 OpName %4 "main" OpName %9 "value" OpName %12 "img" OpDecorate %12 DescriptorSet 0 OpDecorate %12 Binding 0 OpDecorate %22 BuiltIn WorkgroupSize OpDecorate %12 NonReadable %2 = OpTypeVoid %3 = OpTypeFunction %2 %6 = OpTypeFloat 32 %7 = OpTypeVector %6 4 %8 = OpTypePointer Function %7 %10 = OpTypeImage %6 2D 0 0 0 2 Unknown %11 = OpTypePointer UniformConstant %10 %12 = OpVariable %11 UniformConstant %14 = OpTypeInt 32 1 %15 = OpTypeVector %14 2 %16 = OpConstant %14 0 %17 = OpConstantComposite %15 %16 %16 %19 = OpTypeInt 32 0 %20 = OpTypeVector %19 3 %21 = OpConstant %19 1 %22 = OpConstantComposite %20 %21 %21 %21 %4 = OpFunction %2 None %3 %5 = OpLabel %9 = OpVariable %8 Function %13 = OpLoad %10 %12 %18 = OpImageRead %7 %13 %17 OpStore %9 %18 OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-features-shaderStorageImageReadWithoutFormat"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MissingStorageImageFormatWrite) { TEST_DESCRIPTION("Create a shader writing a storage image without an image format"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (feat.shaderStorageImageWriteWithoutFormat) { printf("%s format less storage image write supported.\n", kSkipPrefix); return; } // Checks based off shaderStorageImage(Read|Write)WithoutFormat are // disabled if VK_KHR_format_feature_flags2 is supported. // // https://github.com/KhronosGroup/Vulkan-Docs/blob/6177645341afc/appendices/spirvenv.txt#L553 // if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME)) { printf("%s %s supported, skipping.\n", kSkipPrefix, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); return; } // Make sure compute pipeline has a compute shader stage set const std::string csSource = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpName %main "main" OpName %img "img" OpDecorate %img DescriptorSet 0 OpDecorate %img Binding 0 OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize OpDecorate %img NonWritable %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %7 = OpTypeImage %float 2D 0 0 0 2 Unknown %_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7 %img = OpVariable %_ptr_UniformConstant_7 UniformConstant %int = OpTypeInt 32 1 %v2int = OpTypeVector %int 2 %int_0 = OpConstant %int 0 %14 = OpConstantComposite %v2int %int_0 %int_0 %v4float = OpTypeVector %float 4 %float_0 = OpConstant %float 0 %17 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 %uint = OpTypeInt 32 0 %v3uint = OpTypeVector %uint 3 %uint_1 = OpConstant %uint 1 %main = OpFunction %void None %3 %5 = OpLabel %10 = OpLoad %7 %img OpImageWrite %10 %14 %17 OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-features-shaderStorageImageWriteWithoutFormat"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MissingStorageImageFormatReadForFormat) { TEST_DESCRIPTION("Create a shader reading a storage image without an image format"); AddRequiredExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); AddRequiredExtensions(VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (!AreRequestedExtensionsEnabled()) { printf("%s Required extensions not supported, skipping.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFormatProperties2KHR"); struct { VkFormat format; VkFormatProperties3KHR props; } tests[2] = {}; int n_tests = 0; bool has_without_format_test = false, has_with_format_test = false; // Find storage formats with & without read without format support for (uint32_t fmt = VK_FORMAT_R4G4_UNORM_PACK8; fmt < VK_FORMAT_D16_UNORM; fmt++) { if (has_without_format_test && has_with_format_test) break; auto fmt_props_3 = LvlInitStruct<VkFormatProperties3KHR>(); auto fmt_props = LvlInitStruct<VkFormatProperties2>(&fmt_props_3); vkGetPhysicalDeviceFormatProperties2KHR(gpu(), (VkFormat)fmt, &fmt_props); const bool has_storage = (fmt_props_3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR) != 0; const bool has_read_without_format = (fmt_props_3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) != 0; if (!has_storage) continue; if (has_read_without_format) { if (has_without_format_test) continue; tests[n_tests].format = (VkFormat)fmt; tests[n_tests].props = fmt_props_3; has_without_format_test = true; n_tests++; } else { if (has_with_format_test) continue; tests[n_tests].format = (VkFormat)fmt; tests[n_tests].props = fmt_props_3; has_with_format_test = true; n_tests++; } } if (n_tests == 0) { printf("%s Could not build a test case.\n", kSkipPrefix); return; } // Make sure compute pipeline has a compute shader stage set const std::string csSource = R"( OpCapability Shader OpCapability StorageImageReadWithoutFormat %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %4 "main" OpExecutionMode %4 LocalSize 1 1 1 OpSource GLSL 450 OpName %4 "main" OpName %9 "value" OpName %12 "img" OpDecorate %12 DescriptorSet 0 OpDecorate %12 Binding 0 OpDecorate %22 BuiltIn WorkgroupSize OpDecorate %12 NonReadable %2 = OpTypeVoid %3 = OpTypeFunction %2 %6 = OpTypeFloat 32 %7 = OpTypeVector %6 4 %8 = OpTypePointer Function %7 %10 = OpTypeImage %6 2D 0 0 0 2 Unknown %11 = OpTypePointer UniformConstant %10 %12 = OpVariable %11 UniformConstant %14 = OpTypeInt 32 1 %15 = OpTypeVector %14 2 %16 = OpConstant %14 0 %17 = OpConstantComposite %15 %16 %16 %19 = OpTypeInt 32 0 %20 = OpTypeVector %19 3 %21 = OpConstant %19 1 %22 = OpConstantComposite %20 %21 %21 %21 %4 = OpFunction %2 None %3 %5 = OpLabel %9 = OpVariable %8 Function %13 = OpLoad %10 %12 %18 = OpImageRead %7 %13 %17 OpStore %9 %18 OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding for (int t = 0; t < n_tests; t++) { VkFormat format = tests[t].format; VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_TILING_OPTIMAL); VkDescriptorImageInfo image_info = {}; image_info.imageView = image.targetView(format); image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; VkWriteDescriptorSet descriptor_write = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; descriptor_write.pImageInfo = &image_info; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->reset(); m_commandBuffer->begin(); { VkImageMemoryBarrier img_barrier = LvlInitStruct<VkImageMemoryBarrier>(); img_barrier.srcAccessMask = VK_ACCESS_HOST_READ_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = image.handle(); // Image mis-matches with FB image img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); } vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_layout_.handle(), 0, 1, &ds.set_, 0, nullptr); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-OpTypeImage-06424"); vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1); m_commandBuffer->end(); if (tests[t].props.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_READ_WITHOUT_FORMAT_BIT_KHR) m_errorMonitor->VerifyNotFound(); else m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, MissingStorageImageFormatWriteForFormat) { TEST_DESCRIPTION("Create a shader writing a storage image without an image format"); AddRequiredExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); AddRequiredExtensions(VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (!AreRequestedExtensionsEnabled()) { printf("%s Required extensions not supported, skipping.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFormatProperties2KHR"); struct { VkFormat format; VkFormatProperties3KHR props; } tests[2] = {}; int n_tests = 0; bool has_without_format_test = false, has_with_format_test = false; // Find storage formats with & without write without format support for (uint32_t fmt = VK_FORMAT_R4G4_UNORM_PACK8; fmt < VK_FORMAT_D16_UNORM; fmt++) { if (has_without_format_test && has_with_format_test) break; auto fmt_props_3 = LvlInitStruct<VkFormatProperties3KHR>(); auto fmt_props = LvlInitStruct<VkFormatProperties2>(&fmt_props_3); vkGetPhysicalDeviceFormatProperties2KHR(gpu(), (VkFormat)fmt, &fmt_props); const bool has_storage = (fmt_props_3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_IMAGE_BIT_KHR) != 0; const bool has_write_without_format = (fmt_props_3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR) != 0; if (!has_storage) continue; if (has_write_without_format) { if (has_without_format_test) continue; tests[n_tests].format = (VkFormat)fmt; tests[n_tests].props = fmt_props_3; has_without_format_test = true; n_tests++; } else { if (has_with_format_test) continue; tests[n_tests].format = (VkFormat)fmt; tests[n_tests].props = fmt_props_3; has_with_format_test = true; n_tests++; } } if (n_tests == 0) { printf("%s Could not build a test case.\n", kSkipPrefix); return; } // Make sure compute pipeline has a compute shader stage set const std::string csSource = R"( OpCapability Shader OpCapability StorageImageWriteWithoutFormat %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpName %main "main" OpName %img "img" OpDecorate %img DescriptorSet 0 OpDecorate %img Binding 0 OpDecorate %img NonWritable %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %7 = OpTypeImage %float 2D 0 0 0 2 Unknown %_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7 %img = OpVariable %_ptr_UniformConstant_7 UniformConstant %int = OpTypeInt 32 1 %v2int = OpTypeVector %int 2 %int_0 = OpConstant %int 0 %14 = OpConstantComposite %v2int %int_0 %int_0 %v4float = OpTypeVector %float 4 %float_0 = OpConstant %float 0 %17 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 %uint = OpTypeInt 32 0 %v3uint = OpTypeVector %uint 3 %uint_1 = OpConstant %uint 1 %main = OpFunction %void None %3 %5 = OpLabel %10 = OpLoad %7 %img OpImageWrite %10 %14 %17 OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding for (int t = 0; t < n_tests; t++) { VkFormat format = tests[t].format; VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_STORAGE_BIT, VK_IMAGE_TILING_OPTIMAL); VkDescriptorImageInfo image_info = {}; image_info.imageView = image.targetView(format); image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; VkWriteDescriptorSet descriptor_write = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; descriptor_write.pImageInfo = &image_info; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->reset(); m_commandBuffer->begin(); { VkImageMemoryBarrier img_barrier = LvlInitStruct<VkImageMemoryBarrier>(); img_barrier.srcAccessMask = VK_ACCESS_HOST_READ_BIT; img_barrier.dstAccessMask = VK_ACCESS_SHADER_WRITE_BIT; img_barrier.oldLayout = VK_IMAGE_LAYOUT_UNDEFINED; img_barrier.newLayout = VK_IMAGE_LAYOUT_GENERAL; img_barrier.image = image.handle(); // Image mis-matches with FB image img_barrier.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; img_barrier.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; img_barrier.subresourceRange.baseArrayLayer = 0; img_barrier.subresourceRange.baseMipLevel = 0; img_barrier.subresourceRange.layerCount = 1; img_barrier.subresourceRange.levelCount = 1; vk::CmdPipelineBarrier(m_commandBuffer->handle(), VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, 0, 0, nullptr, 0, nullptr, 1, &img_barrier); } vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_layout_.handle(), 0, 1, &ds.set_, 0, nullptr); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-OpTypeImage-06423"); vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1); m_commandBuffer->end(); if (tests[t].props.optimalTilingFeatures & VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT_KHR) m_errorMonitor->VerifyNotFound(); else m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, MissingStorageTexelBufferFormatWriteForFormat) { TEST_DESCRIPTION("Create a shader writing a storage texel buffer without an image format"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } AddRequiredExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); AddRequiredExtensions(VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, nullptr, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); if (!AreRequestedExtensionsEnabled()) { printf("%s Required extensions not supported, skipping.\n", kSkipPrefix); return; } PFN_vkSetPhysicalDeviceFormatProperties2EXT fpvkSetPhysicalDeviceFormatProperties2EXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatProperties2EXT fpvkGetOriginalPhysicalDeviceFormatProperties2EXT = nullptr; if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatProperties2EXT, fpvkGetOriginalPhysicalDeviceFormatProperties2EXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } const VkFormat format = VK_FORMAT_R8G8B8A8_UNORM; auto fmt_props_3 = LvlInitStruct<VkFormatProperties3>(); auto fmt_props = LvlInitStruct<VkFormatProperties2>(&fmt_props_3); // set so format can be used as a storage texel buffer, but no WITH_FORMAT support fpvkGetOriginalPhysicalDeviceFormatProperties2EXT(gpu(), format, &fmt_props); fmt_props.formatProperties.bufferFeatures |= VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT; fmt_props_3.bufferFeatures |= VK_FORMAT_FEATURE_2_STORAGE_TEXEL_BUFFER_BIT; fmt_props_3.bufferFeatures = (fmt_props_3.bufferFeatures & ~VK_FORMAT_FEATURE_2_STORAGE_WRITE_WITHOUT_FORMAT_BIT); fpvkSetPhysicalDeviceFormatProperties2EXT(gpu(), format, fmt_props); const std::string csSource = R"( OpCapability Shader OpCapability ImageBuffer OpCapability StorageImageWriteWithoutFormat %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpDecorate %img DescriptorSet 0 OpDecorate %img Binding 0 OpDecorate %img NonWritable %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %7 = OpTypeImage %float Buffer 0 0 0 2 Unknown %_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7 %img = OpVariable %_ptr_UniformConstant_7 UniformConstant %int = OpTypeInt 32 1 %v2int = OpTypeVector %int 2 %int_0 = OpConstant %int 0 %14 = OpConstantComposite %v2int %int_0 %int_0 %v4float = OpTypeVector %float 4 %float_0 = OpConstant %float 0 %17 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 %uint = OpTypeInt 32 0 %v3uint = OpTypeVector %uint 3 %uint_1 = OpConstant %uint 1 %main = OpFunction %void None %3 %5 = OpLabel %10 = OpLoad %7 %img OpImageWrite %10 %14 %17 OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource.c_str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding VkBufferCreateInfo buffer_create_info = LvlInitStruct<VkBufferCreateInfo>(); buffer_create_info.size = 1024; buffer_create_info.usage = VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT; VkBufferObj buffer; buffer.init(*m_device, buffer_create_info); VkBufferViewCreateInfo buff_view_ci = LvlInitStruct<VkBufferViewCreateInfo>(); buff_view_ci.buffer = buffer.handle(); buff_view_ci.format = format; buff_view_ci.range = VK_WHOLE_SIZE; VkBufferView buffer_view; VkResult err = vk::CreateBufferView(m_device->device(), &buff_view_ci, NULL, &buffer_view); if (err != VK_SUCCESS) { // device profile layer might hide fact this is not a supported buffer view format printf("%s Device will not be able to initialize buffer view skipped.\n", kSkipPrefix); return; } VkWriteDescriptorSet descriptor_write = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_write.dstSet = ds.set_; descriptor_write.dstBinding = 0; descriptor_write.descriptorCount = 1; descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER; descriptor_write.pTexelBufferView = &buffer_view; vk::UpdateDescriptorSets(m_device->device(), 1, &descriptor_write, 0, NULL); m_commandBuffer->reset(); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_layout_.handle(), 0, 1, &ds.set_, 0, nullptr); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-OpTypeImage-06423"); vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vk::DestroyBufferView(m_device->handle(), buffer_view, nullptr); } TEST_F(VkLayerTest, MissingNonReadableDecorationStorageImageFormatRead) { TEST_DESCRIPTION("Create a shader with a storage image without an image format not marked as non readable"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (feat.shaderStorageImageReadWithoutFormat) { printf("%s format less storage image read supported.\n", kSkipPrefix); return; } // We need to skip this test with VK_KHR_format_feature_flags2 supported, // because checks for read/write without format has to be done per format // rather than as a device feature. The code we test here only looks at // the shader. if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME)) { printf("%s %s supported, skipping.\n", kSkipPrefix, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); return; } // Make sure compute pipeline has a compute shader stage set const std::string csSource = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %4 "main" OpExecutionMode %4 LocalSize 1 1 1 OpSource GLSL 450 OpName %4 "main" OpName %9 "value" OpName %12 "img" OpDecorate %12 DescriptorSet 0 OpDecorate %12 Binding 0 OpDecorate %22 BuiltIn WorkgroupSize %2 = OpTypeVoid %3 = OpTypeFunction %2 %6 = OpTypeFloat 32 %7 = OpTypeVector %6 4 %8 = OpTypePointer Function %7 %10 = OpTypeImage %6 2D 0 0 0 2 Unknown %11 = OpTypePointer UniformConstant %10 %12 = OpVariable %11 UniformConstant %14 = OpTypeInt 32 1 %15 = OpTypeVector %14 2 %16 = OpConstant %14 0 %17 = OpConstantComposite %15 %16 %16 %19 = OpTypeInt 32 0 %20 = OpTypeVector %19 3 %21 = OpConstant %19 1 %22 = OpConstantComposite %20 %21 %21 %21 %4 = OpFunction %2 None %3 %9 = OpVariable %8 Function OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpTypeImage-06270"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MissingNonWritableDecorationStorageImageFormatWrite) { TEST_DESCRIPTION("Create a shader with a storage image without an image format but not marked a non writable"); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (feat.shaderStorageImageWriteWithoutFormat) { printf("%s format less storage image write supported.\n", kSkipPrefix); return; } // We need to skip this test with VK_KHR_format_feature_flags2 supported, // because checks for read/write without format has to be done per format // rather than as a device feature. The code we test here only looks at // the shader. if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME)) { printf("%s %s supported, skipping.\n", kSkipPrefix, VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); return; } // Make sure compute pipeline has a compute shader stage set const std::string csSource = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpName %main "main" OpName %img "img" OpDecorate %img DescriptorSet 0 OpDecorate %img Binding 0 OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %7 = OpTypeImage %float 2D 0 0 0 2 Unknown %_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7 %img = OpVariable %_ptr_UniformConstant_7 UniformConstant %int = OpTypeInt 32 1 %v2int = OpTypeVector %int 2 %int_0 = OpConstant %int 0 %14 = OpConstantComposite %v2int %int_0 %int_0 %v4float = OpTypeVector %float 4 %float_0 = OpConstant %float 0 %17 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 %uint = OpTypeInt 32 0 %v3uint = OpTypeVector %uint 3 %uint_1 = OpConstant %uint 1 %main = OpFunction %void None %3 OpReturn OpFunctionEnd )"; OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; // override with wrong value m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpTypeImage-06269"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, MissingSampledImageDepthComparisonForFormat) { TEST_DESCRIPTION("Verify that OpImage*Dref* operations are supported for given format "); AddRequiredExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); AddRequiredExtensions(VK_KHR_FORMAT_FEATURE_FLAGS_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); if (!AreRequestedExtensionsEnabled()) { printf("%s Required extensions not supported, skipping.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFormatProperties2KHR vkGetPhysicalDeviceFormatProperties2KHR = (PFN_vkGetPhysicalDeviceFormatProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFormatProperties2KHR"); VkFormat format = VK_FORMAT_UNDEFINED; for (uint32_t fmt = VK_FORMAT_R4G4_UNORM_PACK8; fmt < VK_FORMAT_D16_UNORM; fmt++) { auto fmt_props_3 = LvlInitStruct<VkFormatProperties3KHR>(); auto fmt_props = LvlInitStruct<VkFormatProperties2>(&fmt_props_3); vkGetPhysicalDeviceFormatProperties2KHR(gpu(), (VkFormat)fmt, &fmt_props); const bool has_sampling = (fmt_props_3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_BIT_KHR) != 0; const bool has_sampling_img_depth_compare = (fmt_props_3.optimalTilingFeatures & VK_FORMAT_FEATURE_2_SAMPLED_IMAGE_DEPTH_COMPARISON_BIT_KHR) != 0; if (has_sampling && !has_sampling_img_depth_compare) { format = (VkFormat)fmt; break; } } if (format == VK_FORMAT_UNDEFINED) { printf("%s Cannot find suitable format, skipping.\n", kSkipPrefix); return; } const char vsSource[] = R"glsl( #version 450 void main() { } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); const char fsSource[] = R"glsl( #version 450 layout (set = 0, binding = 1) uniform sampler2DShadow tex; void main() { float f = texture(tex, vec3(0)); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); CreatePipelineHelper g_pipe(*this); g_pipe.InitInfo(); g_pipe.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; g_pipe.dsl_bindings_ = {{1, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; g_pipe.InitState(); ASSERT_VK_SUCCESS(g_pipe.CreateGraphicsPipeline()); VkImageObj image(m_device); image.Init(32, 32, 1, format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL); ASSERT_TRUE(image.initialized()); VkSampler sampler; VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); ASSERT_VK_SUCCESS(vk::CreateSampler(m_device->device(), &sampler_ci, nullptr, &sampler)); g_pipe.descriptor_set_->WriteDescriptorImageInfo(1, image.targetView(format), sampler, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, 0, 1); g_pipe.descriptor_set_->UpdateDescriptorSets(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, g_pipe.pipeline_); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, g_pipe.pipeline_layout_.handle(), 0, 1, &g_pipe.descriptor_set_->set_, 0, nullptr); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdDraw-None-06479"); vk::CmdDraw(m_commandBuffer->handle(), 1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureDisable) { // Enable sample shading in pipeline when the feature is disabled. // Disable sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; device_features.sampleRateShading = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Cause the error by enabling sample shading... auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, kErrorBit, "VUID-VkPipelineMultisampleStateCreateInfo-sampleShadingEnable-00784"); } TEST_F(VkLayerTest, InvalidPipelineSampleRateFeatureEnable) { // Enable sample shading in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); // Require sampleRateShading here VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (device_features.sampleRateShading == VK_FALSE) { printf("%s SampleRateShading feature is disabled -- skipping related checks.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto range_test = [this](float value, bool positive_test) { auto info_override = [value](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.sampleShadingEnable = VK_TRUE; helper.pipe_ms_state_ci_.minSampleShading = value; }; CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, "VUID-VkPipelineMultisampleStateCreateInfo-minSampleShading-00786", positive_test); }; range_test(NearestSmaller(0.0F), false); range_test(NearestGreater(1.0F), false); range_test(0.0F, /* positive_test= */ true); range_test(1.0F, /* positive_test= */ true); } TEST_F(VkLayerTest, InvalidPipelineDepthClipControlFeatureDisable) { // Enable negativeOneToOne (VK_EXT_depth_clip_control) in pipeline when the feature is disabled. ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineViewportDepthClipControlCreateInfoEXT clip_control = LvlInitStruct<VkPipelineViewportDepthClipControlCreateInfoEXT>(); clip_control.negativeOneToOne = VK_TRUE; auto set_shading_enable = [clip_control](CreatePipelineHelper &helper) { helper.vp_state_ci_.pNext = &clip_control; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, kErrorBit, "VUID-VkPipelineViewportDepthClipControlCreateInfoEXT-negativeOneToOne-06470"); } TEST_F(VkLayerTest, InvalidPipelineSamplePNext) { // Enable sample shading in pipeline when the feature is disabled. // Check for VK_KHR_get_physical_device_properties2 if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); // Set up the extension structs auto sampleLocations = chain_util::Init<VkPipelineSampleLocationsStateCreateInfoEXT>(); sampleLocations.sampleLocationsInfo.sType = VK_STRUCTURE_TYPE_SAMPLE_LOCATIONS_INFO_EXT; auto coverageToColor = chain_util::Init<VkPipelineCoverageToColorStateCreateInfoNV>(); auto coverageModulation = chain_util::Init<VkPipelineCoverageModulationStateCreateInfoNV>(); auto discriminatrix = [this](const char *name) { return DeviceExtensionSupported(gpu(), nullptr, name); }; chain_util::ExtensionChain chain(discriminatrix, &m_device_extension_names); chain.Add(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME, sampleLocations); chain.Add(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME, coverageToColor); chain.Add(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME, coverageModulation); const void *extension_head = chain.Head(); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (extension_head) { auto good_chain = [extension_head](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = extension_head; }; CreatePipelineHelper::OneshotTest(*this, good_chain, (kErrorBit | kWarningBit), "No error", true); } else { printf("%s Required extension not present -- skipping positive checks.\n", kSkipPrefix); } auto instance_ci = chain_util::Init<VkInstanceCreateInfo>(); auto bad_chain = [&instance_ci](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.pNext = &instance_ci; }; CreatePipelineHelper::OneshotTest(*this, bad_chain, (kErrorBit | kWarningBit), "VUID-VkPipelineMultisampleStateCreateInfo-pNext-pNext"); } TEST_F(VkLayerTest, InvalidPipelineRenderPassShaderResolveQCOM) { TEST_DESCRIPTION("Test pipeline creation VUIDs added with VK_QCOM_render_pass_shader_resolve extension."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); // Require sampleRateShading for these tests VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (device_features.sampleRateShading == VK_FALSE) { printf("%s SampleRateShading feature is disabled -- skipping related checks.\n", kSkipPrefix); return; } if (DeviceExtensionSupported(gpu(), nullptr, VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_QCOM_RENDER_PASS_SHADER_RESOLVE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkPipelineObj pipeline(m_device); // Create a renderPass with two attachments (0=Color, 1=Input) VkAttachmentReference attachmentRefs[2] = {}; attachmentRefs[0].layout = VK_IMAGE_LAYOUT_GENERAL; attachmentRefs[0].attachment = 0; attachmentRefs[1].layout = VK_IMAGE_LAYOUT_GENERAL; attachmentRefs[1].attachment = 1; VkSubpassDescription subpass = {}; subpass.flags = VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &attachmentRefs[0]; subpass.inputAttachmentCount = 1; subpass.pInputAttachments = &attachmentRefs[1]; VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(); rpci.subpassCount = 1; rpci.pSubpasses = &subpass; rpci.attachmentCount = 2; VkAttachmentDescription attach_desc[2] = {}; attach_desc[0].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[0].samples = VK_SAMPLE_COUNT_1_BIT; attach_desc[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[0].finalLayout = VK_IMAGE_LAYOUT_GENERAL; attach_desc[1].format = VK_FORMAT_B8G8R8A8_UNORM; attach_desc[1].samples = VK_SAMPLE_COUNT_4_BIT; attach_desc[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc[1].finalLayout = VK_IMAGE_LAYOUT_GENERAL; rpci.pAttachments = attach_desc; // renderpass has 1xMSAA colorAttachent and 4xMSAA inputAttachment VkRenderPass renderpass; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &renderpass); // renderpass2 has 1xMSAA colorAttachent and 1xMSAA inputAttachment VkRenderPass renderpass2; attach_desc[1].samples = VK_SAMPLE_COUNT_1_BIT; vk::CreateRenderPass(m_device->device(), &rpci, NULL, &renderpass2); // shader uses gl_SamplePosition which causes the SPIR-V to include SampleRateShading capability static const char *sampleRateFragShaderText = R"glsl( #version 450 layout(location = 0) out vec4 uFragColor; void main() { uFragColor = vec4(gl_SamplePosition.x,1,0,1); } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); VkShaderObj fs_sampleRate(this, sampleRateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); pipeline.AddShader(&vs); pipeline.AddShader(&fs); VkPipelineColorBlendAttachmentState att_state1 = {}; att_state1.dstAlphaBlendFactor = VK_BLEND_FACTOR_CONSTANT_COLOR; att_state1.blendEnable = VK_TRUE; pipeline.AddColorAttachment(0, att_state1); VkPipelineMultisampleStateCreateInfo ms_state = LvlInitStruct<VkPipelineMultisampleStateCreateInfo>(); ms_state.flags = 0; ms_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state.sampleShadingEnable = VK_FALSE; ms_state.minSampleShading = 0.0f; ms_state.pSampleMask = nullptr; ms_state.alphaToCoverageEnable = VK_FALSE; ms_state.alphaToOneEnable = VK_FALSE; pipeline.SetMSAA(&ms_state); // Create a pipeline with a subpass using VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM, // but where sample count of input attachment doesnt match rasterizationSamples m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-rasterizationSamples-04899"); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass); m_errorMonitor->VerifyFound(); ms_state.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state.sampleShadingEnable = VK_TRUE; pipeline.SetMSAA(&ms_state); // Create a pipeline with a subpass using VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM, // and with sampleShadingEnable enabled in the pipeline m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-sampleShadingEnable-04900"); pipeline.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass2); m_errorMonitor->VerifyFound(); ms_state.sampleShadingEnable = VK_FALSE; VkPipelineObj pipeline2(m_device); pipeline2.SetMSAA(&ms_state); pipeline2.AddColorAttachment(0, att_state1); pipeline2.AddShader(&vs); pipeline2.AddShader(&fs_sampleRate); // Create a pipeline with a subpass using VK_SUBPASS_DESCRIPTION_FRAGMENT_REGION_BIT_QCOM, // and with SampleRateShading capability enabled in the SPIR-V fragment shader m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-SampleRateShading-06378"); pipeline2.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderpass2); m_errorMonitor->VerifyFound(); // cleanup vk::DestroyRenderPass(m_device->device(), renderpass, NULL); vk::DestroyRenderPass(m_device->device(), renderpass2, NULL); } TEST_F(VkLayerTest, CreateGraphicsPipelineWithBadBasePointer) { TEST_DESCRIPTION("Create Graphics Pipeline with pointers that must be ignored by layers"); ASSERT_NO_FATAL_FAILURE(Init()); m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); ASSERT_TRUE(m_depth_stencil_fmt != 0); m_depthStencil->Init(m_device, static_cast<int32_t>(m_width), static_cast<int32_t>(m_height), m_depth_stencil_fmt); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(m_depthStencil->BindInfo())); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); const VkPipelineVertexInputStateCreateInfo pipeline_vertex_input_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, nullptr, 0, 0, nullptr, 0, nullptr}; const VkPipelineInputAssemblyStateCreateInfo pipeline_input_assembly_state_create_info{ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, VK_FALSE}; const VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info_template{ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, nullptr, 0, VK_FALSE, VK_FALSE, VK_POLYGON_MODE_FILL, VK_CULL_MODE_NONE, VK_FRONT_FACE_COUNTER_CLOCKWISE, VK_FALSE, 0.0f, 0.0f, 0.0f, 1.0f}; VkPipelineLayout pipeline_layout; auto pipeline_layout_create_info = LvlInitStruct<VkPipelineLayoutCreateInfo>(); VkResult err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_create_info, nullptr, &pipeline_layout); ASSERT_VK_SUCCESS(err); VkPipelineRasterizationStateCreateInfo pipeline_rasterization_state_create_info = pipeline_rasterization_state_create_info_template; pipeline_rasterization_state_create_info.rasterizerDiscardEnable = VK_TRUE; uint64_t fake_pipeline_id = 0xCADECADE; VkPipeline fake_pipeline_handle = reinterpret_cast<VkPipeline &>(fake_pipeline_id); VkGraphicsPipelineCreateInfo graphics_pipeline_create_info{VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, nullptr, VK_PIPELINE_CREATE_DERIVATIVE_BIT, 1, &vs.GetStageCreateInfo(), &pipeline_vertex_input_state_create_info, &pipeline_input_assembly_state_create_info, nullptr, nullptr, &pipeline_rasterization_state_create_info, nullptr, nullptr, nullptr, nullptr, pipeline_layout, m_renderPass, 0, fake_pipeline_handle, -1}; VkPipeline pipeline; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-00722"); vk::CreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); graphics_pipeline_create_info.basePipelineHandle = VK_NULL_HANDLE; graphics_pipeline_create_info.basePipelineIndex = 6; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-00723"); vk::CreateGraphicsPipelines(m_device->handle(), VK_NULL_HANDLE, 1, &graphics_pipeline_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); vk::DestroyPipelineLayout(m_device->handle(), pipeline_layout, nullptr); } TEST_F(VkLayerTest, SetDepthRangeUnrestricted) { TEST_DESCRIPTION("Test setting minDepthBounds and maxDepthBounds without VK_EXT_depth_range_unrestricted"); // Extension doesn't have feature bit, so not enabling extension invokes restrictions ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (VK_TRUE != device_features.depthBounds) { printf("%s Test requires unsupported depthBounds feature.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); // Need to set format framework uses for InitRenderTarget m_depth_stencil_fmt = FindSupportedDepthStencilFormat(gpu()); if (m_depth_stencil_fmt == VK_FORMAT_UNDEFINED) { printf("%s No Depth + Stencil format found. Skipped.\n", kSkipPrefix); return; } m_depthStencil->Init(m_device, static_cast<int32_t>(m_width), static_cast<int32_t>(m_height), m_depth_stencil_fmt, VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(m_depthStencil->BindInfo())); VkPipelineDepthStencilStateCreateInfo ds_ci = LvlInitStruct<VkPipelineDepthStencilStateCreateInfo>(); ds_ci.depthTestEnable = VK_TRUE; ds_ci.depthBoundsTestEnable = VK_TRUE; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.ds_ci_ = ds_ci; pipe.InitState(); pipe.ds_ci_.minDepthBounds = 1.5f; pipe.ds_ci_.maxDepthBounds = 1.0f; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); pipe.ds_ci_.minDepthBounds = 1.0f; pipe.ds_ci_.maxDepthBounds = 1.5f; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-02510"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); // Add dynamic depth stencil state instead pipe.ds_ci_.minDepthBounds = 0.0f; pipe.ds_ci_.maxDepthBounds = 0.0f; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_DEPTH_BOUNDS}; VkPipelineDynamicStateCreateInfo dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = 1; dyn_state_ci.pDynamicStates = dyn_states; pipe.dyn_state_ci_ = dyn_state_ci; m_errorMonitor->ExpectSuccess(); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyNotFound(); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDepthBounds-minDepthBounds-02508"); vk::CmdSetDepthBounds(m_commandBuffer->handle(), 1.5f, 0.0f); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetDepthBounds-maxDepthBounds-02509"); vk::CmdSetDepthBounds(m_commandBuffer->handle(), 0.0f, 1.5f); m_errorMonitor->VerifyFound(); m_errorMonitor->ExpectSuccess(); vk::CmdSetDepthBounds(m_commandBuffer->handle(), 1.0f, 1.0f); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, VertexAttributeDivisorExtension) { TEST_DESCRIPTION("Test VUIDs added with VK_EXT_vertex_attribute_divisor extension."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); } if (inst_ext && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); return; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = LvlInitStruct<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(); vadf.vertexAttributeInstanceRateDivisor = VK_TRUE; vadf.vertexAttributeInstanceRateZeroDivisor = VK_TRUE; VkPhysicalDeviceFeatures2 pd_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&vadf); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkPhysicalDeviceLimits &dev_limits = m_device->props.limits; VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT pdvad_props = LvlInitStruct<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&pdvad_props); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); VkVertexInputBindingDivisorDescriptionEXT vibdd = {}; VkPipelineVertexInputDivisorStateCreateInfoEXT pvids_ci = LvlInitStruct<VkPipelineVertexInputDivisorStateCreateInfoEXT>(); pvids_ci.vertexBindingDivisorCount = 1; pvids_ci.pVertexBindingDivisors = &vibdd; VkVertexInputBindingDescription vibd = {}; vibd.stride = 12; vibd.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; if (pdvad_props.maxVertexAttribDivisor < pvids_ci.vertexBindingDivisorCount) { printf("%sThis device does not support %d vertexBindingDivisors, skipping tests\n", kSkipPrefix, pvids_ci.vertexBindingDivisorCount); return; } using std::vector; struct TestCase { uint32_t div_binding; uint32_t div_divisor; uint32_t desc_binding; VkVertexInputRate desc_rate; vector<std::string> vuids; }; // clang-format off vector<TestCase> test_cases = { { 0, 1, 0, VK_VERTEX_INPUT_RATE_VERTEX, {"VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871"} }, { dev_limits.maxVertexInputBindings + 1, 1, 0, VK_VERTEX_INPUT_RATE_INSTANCE, {"VUID-VkVertexInputBindingDivisorDescriptionEXT-binding-01869", "VUID-VkVertexInputBindingDivisorDescriptionEXT-inputRate-01871"} } }; if (UINT32_MAX != pdvad_props.maxVertexAttribDivisor) { // Can't test overflow if maxVAD is UINT32_MAX test_cases.push_back( { 0, pdvad_props.maxVertexAttribDivisor + 1, 0, VK_VERTEX_INPUT_RATE_INSTANCE, {"VUID-VkVertexInputBindingDivisorDescriptionEXT-divisor-01870"} } ); } // clang-format on for (const auto &test_case : test_cases) { const auto bad_divisor_state = [&test_case, &vibdd, &pvids_ci, &vibd](CreatePipelineHelper &helper) { vibdd.binding = test_case.div_binding; vibdd.divisor = test_case.div_divisor; vibd.binding = test_case.desc_binding; vibd.inputRate = test_case.desc_rate; helper.vi_ci_.pNext = &pvids_ci; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexBindingDescriptions = &vibd; }; CreatePipelineHelper::OneshotTest(*this, bad_divisor_state, kErrorBit, test_case.vuids); } } TEST_F(VkLayerTest, VertexAttributeDivisorDisabled) { TEST_DESCRIPTION("Test instance divisor feature disabled for VK_EXT_vertex_attribute_divisor extension."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); } if (inst_ext && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); return; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = LvlInitStruct<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(); vadf.vertexAttributeInstanceRateDivisor = VK_FALSE; vadf.vertexAttributeInstanceRateZeroDivisor = VK_FALSE; VkPhysicalDeviceFeatures2 pd_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&vadf); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT pdvad_props = LvlInitStruct<VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&pdvad_props); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); VkVertexInputBindingDivisorDescriptionEXT vibdd = {}; vibdd.binding = 0; vibdd.divisor = 2; VkPipelineVertexInputDivisorStateCreateInfoEXT pvids_ci = LvlInitStruct<VkPipelineVertexInputDivisorStateCreateInfoEXT>(); pvids_ci.vertexBindingDivisorCount = 1; pvids_ci.pVertexBindingDivisors = &vibdd; VkVertexInputBindingDescription vibd = {}; vibd.binding = vibdd.binding; vibd.stride = 12; vibd.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE; if (pdvad_props.maxVertexAttribDivisor < pvids_ci.vertexBindingDivisorCount) { printf("%sThis device does not support %d vertexBindingDivisors, skipping tests\n", kSkipPrefix, pvids_ci.vertexBindingDivisorCount); return; } const auto instance_rate = [&pvids_ci, &vibd](CreatePipelineHelper &helper) { helper.vi_ci_.pNext = &pvids_ci; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexBindingDescriptions = &vibd; }; CreatePipelineHelper::OneshotTest(*this, instance_rate, kErrorBit, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateDivisor-02229"); } TEST_F(VkLayerTest, VertexAttributeDivisorInstanceRateZero) { TEST_DESCRIPTION("Test instanceRateZero feature of VK_EXT_vertex_attribute_divisor extension."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); } if (inst_ext && DeviceExtensionSupported(gpu(), nullptr, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_EXT_VERTEX_ATTRIBUTE_DIVISOR_EXTENSION_NAME); return; } VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT vadf = LvlInitStruct<VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT>(); vadf.vertexAttributeInstanceRateDivisor = VK_TRUE; vadf.vertexAttributeInstanceRateZeroDivisor = VK_FALSE; VkPhysicalDeviceFeatures2 pd_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&vadf); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDivisorDescriptionEXT vibdd = {}; vibdd.binding = 0; vibdd.divisor = 0; VkPipelineVertexInputDivisorStateCreateInfoEXT pvids_ci = LvlInitStruct<VkPipelineVertexInputDivisorStateCreateInfoEXT>(); pvids_ci.vertexBindingDivisorCount = 1; pvids_ci.pVertexBindingDivisors = &vibdd; VkVertexInputBindingDescription vibd = {}; vibd.binding = vibdd.binding; vibd.stride = 12; vibd.inputRate = VK_VERTEX_INPUT_RATE_INSTANCE; const auto instance_rate = [&pvids_ci, &vibd](CreatePipelineHelper &helper) { helper.vi_ci_.pNext = &pvids_ci; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexBindingDescriptions = &vibd; }; CreatePipelineHelper::OneshotTest( *this, instance_rate, kErrorBit, "VUID-VkVertexInputBindingDivisorDescriptionEXT-vertexAttributeInstanceRateZeroDivisor-02228"); } /*// TODO : This test should be good, but needs Tess support in compiler to run TEST_F(VkLayerTest, InvalidPatchControlPoints) { // Attempt to Create Gfx Pipeline w/o a VS VkResult err; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Invalid Pipeline CreateInfo State: VK_PRIMITIVE_TOPOLOGY_PATCH primitive "); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorPoolSize ds_type_count = {}; ds_type_count.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; ds_type_count.descriptorCount = 1; VkDescriptorPoolCreateInfo ds_pool_ci = LvlInitStruct<VkDescriptorPoolCreateInfo>(); ds_pool_ci.poolSizeCount = 1; ds_pool_ci.pPoolSizes = &ds_type_count; VkDescriptorPool ds_pool; err = vk::CreateDescriptorPool(m_device->device(), VK_DESCRIPTOR_POOL_USAGE_NON_FREE, 1, &ds_pool_ci, NULL, &ds_pool); ASSERT_VK_SUCCESS(err); VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_ALL; dsl_binding.pImmutableSamplers = NULL; VkDescriptorSetLayoutCreateInfo ds_layout_ci = LvlInitStruct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &dsl_binding; VkDescriptorSetLayout ds_layout; err = vk::CreateDescriptorSetLayout(m_device->device(), &ds_layout_ci, NULL, &ds_layout); ASSERT_VK_SUCCESS(err); VkDescriptorSet descriptorSet; err = vk::AllocateDescriptorSets(m_device->device(), ds_pool, VK_DESCRIPTOR_SET_USAGE_NON_FREE, 1, &ds_layout, &descriptorSet); ASSERT_VK_SUCCESS(err); VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.pNext = NULL; pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout; VkPipelineLayout pipeline_layout; err = vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); ASSERT_VK_SUCCESS(err); VkPipelineShaderStageCreateInfo shaderStages[3]; memset(&shaderStages, 0, 3 * sizeof(VkPipelineShaderStageCreateInfo)); VkShaderObj vs(this,bindStateVertShaderText,VK_SHADER_STAGE_VERTEX_BIT); // Just using VS txt for Tess shaders as we don't care about functionality VkShaderObj tc(this,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj te(this,bindStateVertShaderText,VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); shaderStages[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); shaderStages[0].stage = VK_SHADER_STAGE_VERTEX_BIT; shaderStages[0].shader = vs.handle(); shaderStages[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); shaderStages[1].stage = VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT; shaderStages[1].shader = tc.handle(); shaderStages[2] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); shaderStages[2].stage = VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT; shaderStages[2].shader = te.handle(); VkPipelineInputAssemblyStateCreateInfo iaCI = LvlInitStruct<VkPipelineInputAssemblyStateCreateInfo>(); iaCI.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; VkPipelineTessellationStateCreateInfo tsCI = LvlInitStruct<VkPipelineTessellationStateCreateInfo>(); tsCI.patchControlPoints = 0; // This will cause an error VkGraphicsPipelineCreateInfo gp_ci = LvlInitStruct<VkGraphicsPipelineCreateInfo>(); gp_ci.stageCount = 3; gp_ci.pStages = shaderStages; gp_ci.pVertexInputState = NULL; gp_ci.pInputAssemblyState = &iaCI; gp_ci.pTessellationState = &tsCI; gp_ci.pViewportState = NULL; gp_ci.pRasterizationState = NULL; gp_ci.pMultisampleState = NULL; gp_ci.pDepthStencilState = NULL; gp_ci.pColorBlendState = NULL; gp_ci.flags = VK_PIPELINE_CREATE_DISABLE_OPTIMIZATION_BIT; gp_ci.layout = pipeline_layout; gp_ci.renderPass = renderPass(); VkPipelineCacheCreateInfo pc_ci = LvlInitStruct<VkPipelineCacheCreateInfo>(); pc_ci.initialSize = 0; pc_ci.initialData = 0; pc_ci.maxSize = 0; VkPipeline pipeline; VkPipelineCache pipelineCache; err = vk::CreatePipelineCache(m_device->device(), &pc_ci, NULL, &pipelineCache); ASSERT_VK_SUCCESS(err); err = vk::CreateGraphicsPipelines(m_device->device(), pipelineCache, 1, &gp_ci, NULL, &pipeline); m_errorMonitor->VerifyFound(); vk::DestroyPipelineCache(m_device->device(), pipelineCache, NULL); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, NULL); vk::DestroyDescriptorSetLayout(m_device->device(), ds_layout, NULL); vk::DestroyDescriptorPool(m_device->device(), ds_pool, NULL); } */ TEST_F(VkLayerTest, PSOViewportStateTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for non-multiViewport"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto break_vp_state = [](CreatePipelineHelper &helper) { helper.rs_state_ci_.rasterizerDiscardEnable = VK_FALSE; helper.gp_ci_.pViewportState = nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_vp_state, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-rasterizerDiscardEnable-00750"); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; // test viewport and scissor arrays using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, nullptr, 1, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {1, viewports, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {1, nullptr, 1, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.vuids); } vector<TestCase> dyn_test_cases = { {0, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 1, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {1, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {2, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, nullptr, 3, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01216", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01217"}}, }; const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.vuids); } } // Set Extension dynamic states without enabling the required Extensions. TEST_F(VkLayerTest, ExtensionDynamicStatesSetWOExtensionEnabled) { TEST_DESCRIPTION("Create a graphics pipeline with Extension dynamic states without enabling the required Extensions."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); using std::vector; struct TestCase { uint32_t dynamic_state_count; VkDynamicState dynamic_state; char const *errmsg; }; vector<TestCase> dyn_test_cases = { {1, VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, "contains VK_DYNAMIC_STATE_VIEWPORT_W_SCALING_NV, but VK_NV_clip_space_w_scaling"}, {1, VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, "contains VK_DYNAMIC_STATE_DISCARD_RECTANGLE_EXT, but VK_EXT_discard_rectangles"}, {1, VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, "contains VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT, but VK_EXT_sample_locations"}, }; for (const auto &test_case : dyn_test_cases) { VkDynamicState state[1]; state[0] = test_case.dynamic_state; const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = test_case.dynamic_state_count; dyn_state_ci.pDynamicStates = state; helper.dyn_state_ci_ = dyn_state_ci; }; CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.errmsg); } } TEST_F(VkLayerTest, PSOViewportStateMultiViewportTests) { TEST_DESCRIPTION("Test VkPipelineViewportStateCreateInfo viewport and scissor count validation for multiViewport feature"); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features if (!m_device->phy().features().multiViewport) { printf("%s VkPhysicalDeviceFeatures::multiViewport is not supported -- skipping test.\n", kSkipPrefix); return; } // at least 16 viewports supported from here on ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; VkViewport viewports[] = {viewport, viewport}; VkRect2D scissor = {{0, 0}, {64, 64}}; VkRect2D scissors[] = {scissor, scissor}; using std::vector; struct TestCase { uint32_t viewport_count; VkViewport *viewports; uint32_t scissor_count; VkRect2D *scissors; vector<std::string> vuids; }; vector<TestCase> test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {2, nullptr, 2, scissors, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}, {2, viewports, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {2, nullptr, 2, nullptr, {"VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; const auto max_viewports = m_device->phy().properties().limits.maxViewports; const bool max_viewports_maxxed = max_viewports == std::numeric_limits<decltype(max_viewports)>::max(); if (max_viewports_maxxed) { printf("%s VkPhysicalDeviceLimits::maxViewports is UINT32_MAX -- skipping part of test requiring to exceed maxViewports.\n", kSkipPrefix); } else { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747"}}); test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); test_cases.push_back( {too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00747", "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00748"}}); } for (const auto &test_case : test_cases) { const auto break_vp = [&test_case](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.vuids); } vector<TestCase> dyn_test_cases = { {0, viewports, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {2, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}, {0, viewports, 0, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, {0, nullptr, 0, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-arraylength", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-arraylength"}}, }; if (!max_viewports_maxxed) { const auto too_much_viewports = max_viewports + 1; // avoid potentially big allocations by using only nullptr dyn_test_cases.push_back({too_much_viewports, nullptr, 2, scissors, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({2, viewports, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01220"}}); dyn_test_cases.push_back({too_much_viewports, nullptr, too_much_viewports, nullptr, {"VUID-VkPipelineViewportStateCreateInfo-viewportCount-01218", "VUID-VkPipelineViewportStateCreateInfo-scissorCount-01219"}}); } const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT, VK_DYNAMIC_STATE_SCISSOR}; for (const auto &test_case : dyn_test_cases) { const auto break_vp = [&](CreatePipelineHelper &helper) { VkPipelineDynamicStateCreateInfo dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = size(dyn_states); dyn_state_ci.pDynamicStates = dyn_states; helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.viewportCount = test_case.viewport_count; helper.vp_state_ci_.pViewports = test_case.viewports; helper.vp_state_ci_.scissorCount = test_case.scissor_count; helper.vp_state_ci_.pScissors = test_case.scissors; }; CreatePipelineHelper::OneshotTest(*this, break_vp, kErrorBit, test_case.vuids); } } TEST_F(VkLayerTest, DynViewportAndScissorUndefinedDrawState) { TEST_DESCRIPTION("Test viewport and scissor dynamic state that is not set before draw"); ASSERT_NO_FATAL_FAILURE(Init()); // TODO: should also test on !multiViewport if (!m_device->phy().features().multiViewport) { printf("%s Device does not support multiple viewports/scissors; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); const VkPipelineLayoutObj pipeline_layout(m_device); VkPipelineObj pipeline_dyn_vp(m_device); pipeline_dyn_vp.AddShader(&vs); pipeline_dyn_vp.AddShader(&fs); pipeline_dyn_vp.AddDefaultColorAttachment(); pipeline_dyn_vp.MakeDynamic(VK_DYNAMIC_STATE_VIEWPORT); pipeline_dyn_vp.SetScissor(m_scissors); ASSERT_VK_SUCCESS(pipeline_dyn_vp.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); VkPipelineObj pipeline_dyn_sc(m_device); pipeline_dyn_sc.AddShader(&vs); pipeline_dyn_sc.AddShader(&fs); pipeline_dyn_sc.AddDefaultColorAttachment(); pipeline_dyn_sc.SetViewport(m_viewports); pipeline_dyn_sc.MakeDynamic(VK_DYNAMIC_STATE_SCISSOR); ASSERT_VK_SUCCESS(pipeline_dyn_sc.CreateVKPipeline(pipeline_layout.handle(), m_renderPass)); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-commandBuffer-02701"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_vp.handle()); vk::CmdSetViewport(m_commandBuffer->handle(), 1, 1, &m_viewports[0]); // Forgetting to set needed 0th viewport (PSO viewportCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-commandBuffer-02701"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_dyn_sc.handle()); vk::CmdSetScissor(m_commandBuffer->handle(), 1, 1, &m_scissors[0]); // Forgetting to set needed 0th scissor (PSO scissorCount == 1) m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PSOLineWidthInvalid) { TEST_DESCRIPTION("Test non-1.0 lineWidth errors when pipeline is created and in vkCmdSetLineWidth"); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); if (IsPlatform(kNexusPlayer)) { printf("%s This test should not run on Nexus Player\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const std::vector<float> test_cases = {-1.0f, 0.0f, NearestSmaller(1.0f), NearestGreater(1.0f), NAN}; // test VkPipelineRasterizationStateCreateInfo::lineWidth for (const auto test_case : test_cases) { const auto set_lineWidth = [&](CreatePipelineHelper &helper) { helper.rs_state_ci_.lineWidth = test_case; }; CreatePipelineHelper::OneshotTest(*this, set_lineWidth, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-00749"); } // test vk::CmdSetLineWidth m_commandBuffer->begin(); for (const auto test_case : test_cases) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetLineWidth-lineWidth-00788"); vk::CmdSetLineWidth(m_commandBuffer->handle(), test_case); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, PipelineCreationCacheControl) { TEST_DESCRIPTION("Test VK_EXT_pipeline_creation_cache_control"); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_PIPELINE_CREATION_CACHE_CONTROL_EXTENSION_NAME); } else { printf("%s VK_EXT_pipeline_creation_cache_control not supported, skipping tests\n", kSkipPrefix); return; } VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT cache_control_features = LvlInitStruct<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT>(); cache_control_features.pipelineCreationCacheControl = VK_FALSE; // Tests all assume feature is off ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &cache_control_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_graphics_flags = [&](CreatePipelineHelper &helper) { helper.gp_ci_.flags = VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT; }; CreatePipelineHelper::OneshotTest(*this, set_graphics_flags, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pipelineCreationCacheControl-02878"); const auto set_compute_flags = [&](CreateComputePipelineHelper &helper) { helper.cp_ci_.flags = VK_PIPELINE_CREATE_EARLY_RETURN_ON_FAILURE_BIT_EXT; }; CreateComputePipelineHelper::OneshotTest(*this, set_compute_flags, kErrorBit, "VUID-VkComputePipelineCreateInfo-pipelineCreationCacheControl-02875"); VkPipelineCache pipeline_cache; VkPipelineCacheCreateInfo cache_create_info = LvlInitStruct<VkPipelineCacheCreateInfo>(); cache_create_info.initialDataSize = 0; cache_create_info.flags = VK_PIPELINE_CACHE_CREATE_EXTERNALLY_SYNCHRONIZED_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineCacheCreateInfo-pipelineCreationCacheControl-02892"); vk::CreatePipelineCache(m_device->device(), &cache_create_info, nullptr, &pipeline_cache); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_binding_00618) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-binding-00618: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = m_device->props.limits.maxVertexInputBindings; const auto set_binding = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &vertex_input_binding_description; helper.vi_ci_.vertexBindingDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_binding, kErrorBit, "VUID-VkVertexInputBindingDescription-binding-00618"); } TEST_F(VkLayerTest, VUID_VkVertexInputBindingDescription_stride_00619) { TEST_DESCRIPTION( "Test VUID-VkVertexInputBindingDescription-stride-00619: stride must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputBindingStride"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test when stride is greater than VkPhysicalDeviceLimits::maxVertexInputBindingStride. VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride + 1; const auto set_binding = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &vertex_input_binding_description; helper.vi_ci_.vertexBindingDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_binding, kErrorBit, "VUID-VkVertexInputBindingDescription-stride-00619"); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_location_00620) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-location-00620: location must be less than " "VkPhysicalDeviceLimits::maxVertexInputAttributes"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test when location is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputAttributes. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.location = m_device->props.limits.maxVertexInputAttributes; const auto set_attribute = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexAttributeDescriptions = &vertex_input_attribute_description; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_attribute, kErrorBit, vector<string>{"VUID-VkVertexInputAttributeDescription-location-00620", "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"}); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_binding_00621) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-binding-00621: binding must be less than " "VkPhysicalDeviceLimits::maxVertexInputBindings"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test when binding is greater than or equal to VkPhysicalDeviceLimits::maxVertexInputBindings. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.binding = m_device->props.limits.maxVertexInputBindings; const auto set_attribute = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexAttributeDescriptions = &vertex_input_attribute_description; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_attribute, kErrorBit, vector<string>{"VUID-VkVertexInputAttributeDescription-binding-00621", "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615"}); } TEST_F(VkLayerTest, VUID_VkVertexInputAttributeDescription_offset_00622) { TEST_DESCRIPTION( "Test VUID-VkVertexInputAttributeDescription-offset-00622: offset must be less than or equal to " "VkPhysicalDeviceLimits::maxVertexInputAttributeOffset"); EnableDeviceProfileLayer(); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); uint32_t maxVertexInputAttributeOffset = 0; { VkPhysicalDeviceProperties device_props = {}; vk::GetPhysicalDeviceProperties(gpu(), &device_props); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; if (maxVertexInputAttributeOffset == 0xFFFFFFFF) { // Attempt to artificially lower maximum offset PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vk::GetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); if (!fpvkSetPhysicalDeviceLimitsEXT) { printf("%s All offsets are valid & device_profile_api not found; skipped.\n", kSkipPrefix); return; } device_props.limits.maxVertexInputAttributeOffset = device_props.limits.maxVertexInputBindingStride - 2; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &device_props.limits); maxVertexInputAttributeOffset = device_props.limits.maxVertexInputAttributeOffset; } } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription vertex_input_binding_description{}; vertex_input_binding_description.binding = 0; vertex_input_binding_description.stride = m_device->props.limits.maxVertexInputBindingStride; vertex_input_binding_description.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; // Test when offset is greater than maximum. VkVertexInputAttributeDescription vertex_input_attribute_description{}; vertex_input_attribute_description.format = VK_FORMAT_R8_UNORM; vertex_input_attribute_description.offset = maxVertexInputAttributeOffset + 1; const auto set_attribute = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &vertex_input_binding_description; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexAttributeDescriptions = &vertex_input_attribute_description; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_attribute, kErrorBit, "VUID-VkVertexInputAttributeDescription-offset-00622"); } TEST_F(VkLayerTest, NumSamplesMismatch) { // Create CommandBuffer where MSAA samples doesn't match RenderPass // sampleCount m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-rasterizationSamples-04740"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = LvlInitStruct<VkPipelineMultisampleStateCreateInfo>(); pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const VkPipelineLayoutObj pipeline_layout(m_device, {&descriptor_set.layout_}); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); // We shouldn't need a fragment shader // but add it to be able to run on more devices VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); pipe.SetMSAA(&pipe_ms_state_ci); m_errorMonitor->SetUnexpectedError("VUID-VkGraphicsPipelineCreateInfo-subpass-00757"); pipe.CreateVKPipeline(pipeline_layout.handle(), renderPass()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.handle()); VkViewport viewport = {0, 0, 16, 16, 0, 1}; vk::CmdSetViewport(m_commandBuffer->handle(), 0, 1, &viewport); VkRect2D scissor = {{0, 0}, {16, 16}}; vk::CmdSetScissor(m_commandBuffer->handle(), 0, 1, &scissor); // Render triangle (the error should trigger on the attempt to draw). m_commandBuffer->Draw(3, 1, 0, 0); // Finalize recording of the command buffer m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NumBlendAttachMismatch) { // Create Pipeline where the number of blend attachments doesn't match the // number of color attachments. In this case, we don't add any color // blend attachments even though we have a color attachment. ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineMultisampleStateCreateInfo pipe_ms_state_ci = LvlInitStruct<VkPipelineMultisampleStateCreateInfo>(); pipe_ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; pipe_ms_state_ci.sampleShadingEnable = 0; pipe_ms_state_ci.minSampleShading = 1.0; pipe_ms_state_ci.pSampleMask = NULL; const auto set_MSAA = [&](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_ = pipe_ms_state_ci; helper.cb_ci_.attachmentCount = 0; }; CreatePipelineHelper::OneshotTest(*this, set_MSAA, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06042"); } TEST_F(VkLayerTest, CmdClearAttachmentTests) { TEST_DESCRIPTION("Various tests for validating usage of vkCmdClearAttachments"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Main thing we care about for this test is that the VkImage obj we're // clearing matches Color Attachment of FB // Also pass down other dummy params to keep driver and paramchecker happy VkClearAttachment color_attachment; color_attachment.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; color_attachment.clearValue.color.float32[0] = 1.0; color_attachment.clearValue.color.float32[1] = 1.0; color_attachment.clearValue.color.float32[2] = 1.0; color_attachment.clearValue.color.float32[3] = 1.0; color_attachment.colorAttachment = 0; VkClearRect clear_rect = {{{0, 0}, {(uint32_t)m_width, (uint32_t)m_height}}, 0, 1}; clear_rect.rect.extent.width = renderPassBeginInfo().renderArea.extent.width + 4; clear_rect.rect.extent.height = clear_rect.rect.extent.height / 2; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdClearAttachments-pRects-00016"); vk::CmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer >= view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 1; clear_rect.layerCount = 1; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdClearAttachments-pRects-00017"); vk::CmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); // baseLayer + layerCount > view layers clear_rect.rect.extent.width = (uint32_t)m_width; clear_rect.baseArrayLayer = 0; clear_rect.layerCount = 2; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdClearAttachments-pRects-00017"); vk::CmdClearAttachments(m_commandBuffer->handle(), 1, &color_attachment, 1, &clear_rect); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, InvalidVertexBindingDescriptions) { TEST_DESCRIPTION( "Attempt to create a graphics pipeline where:" "1) count of vertex bindings exceeds device's maxVertexInputBindings limit" "2) requested bindings include a duplicate binding value"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const uint32_t binding_count = m_device->props.limits.maxVertexInputBindings + 1; std::vector<VkVertexInputBindingDescription> input_bindings(binding_count); for (uint32_t i = 0; i < binding_count; ++i) { input_bindings[i].binding = i; input_bindings[i].stride = 4; input_bindings[i].inputRate = VK_VERTEX_INPUT_RATE_VERTEX; } // Let the last binding description use same binding as the first one input_bindings[binding_count - 1].binding = 0; VkVertexInputAttributeDescription input_attrib; input_attrib.binding = 0; input_attrib.location = 0; input_attrib.format = VK_FORMAT_R32G32B32_SFLOAT; input_attrib.offset = 0; const auto set_Info = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = input_bindings.data(); helper.vi_ci_.vertexBindingDescriptionCount = binding_count; helper.vi_ci_.pVertexAttributeDescriptions = &input_attrib; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest( *this, set_Info, kErrorBit, vector<string>{"VUID-VkPipelineVertexInputStateCreateInfo-vertexBindingDescriptionCount-00613", "VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616"}); } TEST_F(VkLayerTest, InvalidVertexAttributeDescriptions) { TEST_DESCRIPTION( "Attempt to create a graphics pipeline where:" "1) count of vertex attributes exceeds device's maxVertexInputAttributes limit" "2) requested location include a duplicate location value" "3) binding used by one attribute is not defined by a binding description"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; input_binding.binding = 0; input_binding.stride = 4; input_binding.inputRate = VK_VERTEX_INPUT_RATE_VERTEX; const uint32_t attribute_count = m_device->props.limits.maxVertexInputAttributes + 1; std::vector<VkVertexInputAttributeDescription> input_attribs(attribute_count); for (uint32_t i = 0; i < attribute_count; ++i) { input_attribs[i].binding = 0; input_attribs[i].location = i; input_attribs[i].format = VK_FORMAT_R32G32B32_SFLOAT; input_attribs[i].offset = 0; } // Let the last input_attribs description use same location as the first one input_attribs[attribute_count - 1].location = 0; // Let the last input_attribs description use binding which is not defined input_attribs[attribute_count - 1].binding = 1; const auto set_Info = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &input_binding; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexAttributeDescriptions = input_attribs.data(); helper.vi_ci_.vertexAttributeDescriptionCount = attribute_count; }; CreatePipelineHelper::OneshotTest( *this, set_Info, kErrorBit, vector<string>{"VUID-VkPipelineVertexInputStateCreateInfo-vertexAttributeDescriptionCount-00614", "VUID-VkPipelineVertexInputStateCreateInfo-binding-00615", "VUID-VkPipelineVertexInputStateCreateInfo-pVertexAttributeDescriptions-00617"}); } TEST_F(VkLayerTest, ColorBlendInvalidLogicOp) { TEST_DESCRIPTION("Attempt to use invalid VkPipelineColorBlendStateCreateInfo::logicOp value."); ASSERT_NO_FATAL_FAILURE(Init()); // enables all supported features ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().logicOp) { printf("%s Device does not support logicOp feature; skipped.\n", kSkipPrefix); return; } const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; helper.cb_ci_.logicOp = static_cast<VkLogicOp>(VK_LOGIC_OP_SET + 1); // invalid logicOp to be tested }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, kErrorBit, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00607"); } TEST_F(VkLayerTest, ColorBlendUnsupportedLogicOp) { TEST_DESCRIPTION("Attempt enabling VkPipelineColorBlendStateCreateInfo::logicOpEnable when logicOp feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_shading_enable = [](CreatePipelineHelper &helper) { helper.cb_ci_.logicOpEnable = VK_TRUE; }; CreatePipelineHelper::OneshotTest(*this, set_shading_enable, kErrorBit, "VUID-VkPipelineColorBlendStateCreateInfo-logicOpEnable-00606"); } TEST_F(VkLayerTest, ColorBlendUnsupportedDualSourceBlend) { TEST_DESCRIPTION("Attempt to use dual-source blending when dualSrcBlend feature is disabled."); VkPhysicalDeviceFeatures features{}; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineColorBlendAttachmentState cb_attachments = {}; const auto set_dsb_src_color_enable = [&](CreatePipelineHelper &helper) { helper.cb_attachments_[0] = cb_attachments; }; cb_attachments.blendEnable = VK_TRUE; cb_attachments.srcColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR; // bad! cb_attachments.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; cb_attachments.colorBlendOp = VK_BLEND_OP_ADD; cb_attachments.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; cb_attachments.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; cb_attachments.alphaBlendOp = VK_BLEND_OP_ADD; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, kErrorBit, "VUID-VkPipelineColorBlendAttachmentState-srcColorBlendFactor-00608"); cb_attachments.blendEnable = VK_TRUE; cb_attachments.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; cb_attachments.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR; // bad cb_attachments.colorBlendOp = VK_BLEND_OP_ADD; cb_attachments.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; cb_attachments.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; cb_attachments.alphaBlendOp = VK_BLEND_OP_ADD; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, kErrorBit, "VUID-VkPipelineColorBlendAttachmentState-dstColorBlendFactor-00609"); cb_attachments.blendEnable = VK_TRUE; cb_attachments.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; cb_attachments.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; cb_attachments.colorBlendOp = VK_BLEND_OP_ADD; cb_attachments.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC1_ALPHA; // bad cb_attachments.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; cb_attachments.alphaBlendOp = VK_BLEND_OP_ADD; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, kErrorBit, "VUID-VkPipelineColorBlendAttachmentState-srcAlphaBlendFactor-00610"); cb_attachments.blendEnable = VK_TRUE; cb_attachments.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_COLOR; cb_attachments.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; cb_attachments.colorBlendOp = VK_BLEND_OP_ADD; cb_attachments.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; cb_attachments.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA; // bad! cb_attachments.alphaBlendOp = VK_BLEND_OP_ADD; CreatePipelineHelper::OneshotTest(*this, set_dsb_src_color_enable, kErrorBit, "VUID-VkPipelineColorBlendAttachmentState-dstAlphaBlendFactor-00611"); } TEST_F(VkLayerTest, InvalidSPIRVCodeSize) { TEST_DESCRIPTION("Test that errors are produced for a spirv modules with invalid code sizes"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Invalid SPIR-V header"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo = LvlInitStruct<VkShaderModuleCreateInfo>(); struct icd_spv_header spv; spv.magic = ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = 4; moduleCreateInfo.flags = 0; vk::CreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01376"); std::vector<uint32_t> shader; VkShaderModuleCreateInfo module_create_info = LvlInitStruct<VkShaderModuleCreateInfo>(); VkShaderModule shader_module; this->GLSLtoSPV(&m_device->props.limits, VK_SHADER_STAGE_VERTEX_BIT, bindStateVertShaderText, shader); module_create_info.pCode = shader.data(); // Introduce failure by making codeSize a non-multiple of 4 module_create_info.codeSize = shader.size() * sizeof(uint32_t) - 1; module_create_info.flags = 0; vk::CreateShaderModule(m_device->handle(), &module_create_info, NULL, &shader_module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidSPIRVMagic) { TEST_DESCRIPTION("Test that an error is produced for a spirv module with a bad magic number"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Invalid SPIR-V magic number"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderModule module; VkShaderModuleCreateInfo moduleCreateInfo = LvlInitStruct<VkShaderModuleCreateInfo>(); struct icd_spv_header spv; spv.magic = (uint32_t)~ICD_SPV_MAGIC; spv.version = ICD_SPV_VERSION; spv.gen_magic = 0; moduleCreateInfo.pCode = (const uint32_t *)&spv; moduleCreateInfo.codeSize = sizeof(spv); moduleCreateInfo.flags = 0; vk::CreateShaderModule(m_device->device(), &moduleCreateInfo, NULL, &module); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineVertexOutputNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex output that is not consumed by the fragment stage"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 layout(location=0) out float x; void main(){ gl_Position = vec4(1); x = 0; } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit, "not consumed by fragment shader"); } TEST_F(VkLayerTest, CreatePipelineCheckShaderSpecializationApplied) { TEST_DESCRIPTION( "Make sure specialization constants get applied during shader validation by using a value that breaks compilation."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Size an array using a specialization constant of default value equal to 1. std::string const fs_src = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %main "main" OpExecutionMode %main OriginUpperLeft OpSource GLSL 450 OpName %main "main" OpName %size "size" OpName %array "array" OpDecorate %size SpecId 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %int = OpTypeInt 32 1 %size = OpSpecConstant %int 1 %_arr_float_size = OpTypeArray %float %size %_ptr_Function__arr_float_size = OpTypePointer Function %_arr_float_size %int_0 = OpConstant %int 0 %float_0 = OpConstant %float 0 %_ptr_Function_float = OpTypePointer Function %float %main = OpFunction %void None %3 %5 = OpLabel %array = OpVariable %_ptr_Function__arr_float_size Function %15 = OpAccessChain %_ptr_Function_float %array %int_0 OpStore %15 %float_0 OpReturn OpFunctionEnd)"; VkShaderObj fs(this, fs_src, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); // Set the specialization constant to 0. const VkSpecializationMapEntry entry = { 0, // id 0, // offset sizeof(uint32_t) // size }; uint32_t data = 0; const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(uint32_t), &data, }; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.shader_stages_[1].pSpecializationInfo = &specialization_info; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-pSpecializationInfo-06719"); } TEST_F(VkLayerTest, CreatePipelineCheckShaderBadSpecializationOffsetOutOfBounds) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout (constant_id = 0) const float r = 0.0f; layout(location = 0) out vec4 uFragColor; void main(){ uFragColor = vec4(r,1,0,1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); // Entry offset is greater than dataSize. const VkSpecializationMapEntry entry = {0, 5, sizeof(uint32_t)}; uint32_t data = 1; const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(float), &data, }; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.shader_stages_[1].pSpecializationInfo = &specialization_info; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationInfo-offset-00773"); } TEST_F(VkLayerTest, CreatePipelineCheckShaderBadSpecializationSizeOutOfBounds) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout (constant_id = 0) const float r = 0.0f; layout(location = 0) out vec4 uFragColor; void main(){ uFragColor = vec4(r,1,0,1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); // Entry size is greater than dataSize minus offset. const VkSpecializationMapEntry entry = {0, 3, sizeof(uint32_t)}; uint32_t data = 1; const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(float), &data, }; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.shader_stages_[1].pSpecializationInfo = &specialization_info; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationInfo-pMapEntries-00774"); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorTypeMismatch) { TEST_DESCRIPTION("Challenge core_validation with shader validation issues related to vkCreateGraphicsPipelines."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }); char const *vsSource = R"glsl( #version 450 layout (std140, set = 0, binding = 0) uniform buf { mat4 mvp; } ubuf; void main(){ gl_Position = ubuf.mvp * vec4(1); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {vs.GetStageCreateInfo(), pipe.fs_->GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&descriptor_set.layout_}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Type mismatch on descriptor slot 0.0 "); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderDescriptorNotAccessible) { TEST_DESCRIPTION( "Create a pipeline in which a descriptor used by a shader stage does not include that stage in its stageFlags."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); OneOffDescriptorSet ds(m_device, { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT /*!*/, nullptr}, }); char const *vsSource = R"glsl( #version 450 layout (std140, set = 0, binding = 0) uniform buf { mat4 mvp; } ubuf; void main(){ gl_Position = ubuf.mvp * vec4(1); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {vs.GetStageCreateInfo(), pipe.fs_->GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds.layout_}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Shader uses descriptor slot 0.0 "); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderPushConstantNotDeclared) { TEST_DESCRIPTION( "Create a graphics pipeline in which a push constant range containing a push constant block member is not declared in the " "layout."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 layout(push_constant, std430) uniform foo { float x; } consts; void main(){ gl_Position = vec4(consts.x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); // Set up a push constant range VkPushConstantRange push_constant_range = {}; // Set to the wrong stage to challenge core_validation push_constant_range.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; push_constant_range.size = 4; const VkPipelineLayoutObj pipeline_layout(m_device, {}, {push_constant_range}); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {vs.GetStageCreateInfo(), pipe.fs_->GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}, {push_constant_range}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-layout-00756"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPushConstantRange) { TEST_DESCRIPTION("Invalid use of VkPushConstantRange structs."); ASSERT_NO_FATAL_FAILURE(Init()); VkPhysicalDeviceProperties device_props = {}; vk::GetPhysicalDeviceProperties(gpu(), &device_props); // will be at least 256 as required from the spec const uint32_t maxPushConstantsSize = device_props.limits.maxPushConstantsSize; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; VkPushConstantRange push_constant_range = {0, 0, 4}; VkPipelineLayoutCreateInfo pipeline_layout_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &push_constant_range}; // stageFlags of 0 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-stageFlags-requiredbitmask"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // offset over limit push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, maxPushConstantsSize, 8}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-offset-00294"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-size-00298"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // offset not multiple of 4 push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 1, 8}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-offset-00295"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // size of 0 push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, 0}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-size-00296"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // size not multiple of 4 push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, 7}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-size-00297"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // size over limit push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, maxPushConstantsSize + 4}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-size-00298"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // size over limit of non-zero offset push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 4, maxPushConstantsSize}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPushConstantRange-size-00298"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); // Sanity check its a valid range before making duplicate push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, maxPushConstantsSize}; m_errorMonitor->ExpectSuccess(); ASSERT_VK_SUCCESS(vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, NULL, &pipeline_layout)); vk::DestroyPipelineLayout(m_device->device(), pipeline_layout, nullptr); m_errorMonitor->VerifyNotFound(); // Duplicate ranges VkPushConstantRange push_constant_range_duplicate[2] = {push_constant_range, push_constant_range}; pipeline_layout_info.pushConstantRangeCount = 2; pipeline_layout_info.pPushConstantRanges = push_constant_range_duplicate; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pPushConstantRanges-00292"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_info, nullptr, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidCmdPushConstantRange) { TEST_DESCRIPTION("Invalid use of VkPushConstantRange values in vkCmdPushConstants."); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceLimitsEXT, fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } // Set limit to be same max as the shader usages const uint32_t maxPushConstantsSize = 16; VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); props.limits.maxPushConstantsSize = maxPushConstantsSize; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitViewport()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->ExpectSuccess(); char const *const vsSource = R"glsl( #version 450 layout(push_constant, std430) uniform foo { float x[4]; } constants; void main(){ gl_Position = vec4(constants.x[0]); } )glsl"; VkShaderObj const vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj const fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); // Set up a push constant range VkPushConstantRange push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, maxPushConstantsSize}; const VkPipelineLayoutObj pipeline_layout(m_device, {}, {push_constant_range}); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}, {push_constant_range}); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyNotFound(); const float data[16] = {}; // dummy data to match shader size m_commandBuffer->begin(); // size of 0 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPushConstants-size-arraylength"); vk::CmdPushConstants(m_commandBuffer->handle(), pipe.pipeline_layout_.handle(), VK_SHADER_STAGE_VERTEX_BIT, 0, 0, data); m_errorMonitor->VerifyFound(); // offset not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPushConstants-offset-00368"); vk::CmdPushConstants(m_commandBuffer->handle(), pipe.pipeline_layout_.handle(), VK_SHADER_STAGE_VERTEX_BIT, 1, 4, data); m_errorMonitor->VerifyFound(); // size not multiple of 4 m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPushConstants-size-00369"); vk::CmdPushConstants(m_commandBuffer->handle(), pipe.pipeline_layout_.handle(), VK_SHADER_STAGE_VERTEX_BIT, 0, 5, data); m_errorMonitor->VerifyFound(); // offset at limit m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPushConstants-offset-00370"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPushConstants-size-00371"); vk::CmdPushConstants(m_commandBuffer->handle(), pipe.pipeline_layout_.handle(), VK_SHADER_STAGE_VERTEX_BIT, maxPushConstantsSize, 4, data); m_errorMonitor->VerifyFound(); // size at limit m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPushConstants-size-00371"); vk::CmdPushConstants(m_commandBuffer->handle(), pipe.pipeline_layout_.handle(), VK_SHADER_STAGE_VERTEX_BIT, 0, maxPushConstantsSize + 4, data); m_errorMonitor->VerifyFound(); // Size at limit, should be valid m_errorMonitor->ExpectSuccess(); vk::CmdPushConstants(m_commandBuffer->handle(), pipe.pipeline_layout_.handle(), VK_SHADER_STAGE_VERTEX_BIT, 0, maxPushConstantsSize, data); m_errorMonitor->VerifyNotFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, CreatePipelineCheckShaderNotEnabled) { TEST_DESCRIPTION( "Create a graphics pipeline in which a capability declared by the shader requires a feature not enabled on the device."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); // Some awkward steps are required to test with custom device features. VkPhysicalDeviceFeatures device_features = {}; // Disable support for 64 bit floats device_features.shaderFloat64 = false; // The sacrificial device object ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout(location=0) out vec4 color; void main(){ dvec4 green = vec4(0.0, 1.0, 0.0, 1.0); color = vec4(green); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateShaderModuleCheckBadCapability) { TEST_DESCRIPTION("Create a shader in which a capability declared by the shader is not supported."); // Note that this failure message comes from spirv-tools, specifically the validator. ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const std::string spv_source = R"( OpCapability ImageRect OpEntryPoint Vertex %main "main" %main = OpFunction %void None %3 OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Capability ImageRect is not allowed by Vulkan"); VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_VERTEX_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input which is not present in the outputs of the previous stage"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout(location=0) in float x; layout(location=0) out vec4 color; void main(){ color = vec4(x); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "not written by vertex shader"); } TEST_F(VkLayerTest, CreatePipelineFragmentInputNotProvidedInBlock) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader input within an interace block, which is not present in the outputs " "of the previous stage."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 in block { layout(location=0) float x; } ins; layout(location=0) out vec4 color; void main(){ color = vec4(ins.x); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "not written by vertex shader"); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for mismatched types across the vertex->fragment shader interface"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 layout(location=0) out int x; void main(){ x = 0; gl_Position = vec4(1); } )glsl"; char const *fsSource = R"glsl( #version 450 layout(location=0) in float x; /* VS writes int */ layout(location=0) out vec4 color; void main(){ color = vec4(x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Type mismatch on location 0"); } TEST_F(VkLayerTest, CreatePipelineVsFsTypeMismatchInBlock) { TEST_DESCRIPTION( "Test that an error is produced for mismatched types across the vertex->fragment shader interface, when the variable is " "contained within an interface block"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 out block { layout(location=0) int x; } outs; void main(){ outs.x = 0; gl_Position = vec4(1); } )glsl"; char const *fsSource = R"glsl( #version 450 in block { layout(location=0) float x; } ins; /* VS writes int */ layout(location=0) out vec4 color; void main(){ color = vec4(ins.x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Type mismatch on location 0"); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByLocation) { TEST_DESCRIPTION( "Test that an error is produced for location mismatches across the vertex->fragment shader interface; This should manifest " "as a not-written/not-consumed pair, but flushes out broken walking of the interfaces"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 out block { layout(location=1) float x; } outs; void main(){ outs.x = 0; gl_Position = vec4(1); } )glsl"; char const *fsSource = R"glsl( #version 450 in block { layout(location=0) float x; } ins; layout(location=0) out vec4 color; void main(){ color = vec4(ins.x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "location 0.0 which is not written by vertex shader"); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByComponent) { TEST_DESCRIPTION( "Test that an error is produced for component mismatches across the vertex->fragment shader interface. It's not enough to " "have the same set of locations in use; matching is defined in terms of spirv variables."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 out block { layout(location=0, component=0) float x; } outs; void main(){ outs.x = 0; gl_Position = vec4(1); } )glsl"; char const *fsSource = R"glsl( #version 450 in block { layout(location=0, component=1) float x; } ins; layout(location=0) out vec4 color; void main(){ color = vec4(ins.x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "location 0.1 which is not written by vertex shader"); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecision) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 layout(location=0) out mediump float x; void main() { gl_Position = vec4(0); x = 1.0; } )glsl"; char const *fsSource = R"glsl( #version 450 layout(location=0) in highp float x; layout(location=0) out vec4 color; void main() { color = vec4(x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "differ in precision"); } TEST_F(VkLayerTest, CreatePipelineVsFsMismatchByPrecisionBlock) { TEST_DESCRIPTION("Test that the RelaxedPrecision decoration is validated to match"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 out block { layout(location=0) mediump float x; }; void main() { gl_Position = vec4(0); x = 1.0; } )glsl"; char const *fsSource = R"glsl( #version 450 in block { layout(location=0) highp float x; }; layout(location=0) out vec4 color; void main() { color = vec4(x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "differ in precision"); } TEST_F(VkLayerTest, CreatePipelineAttribNotConsumed) { TEST_DESCRIPTION("Test that a warning is produced for a vertex attribute which is not consumed by the vertex shader"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; const auto set_info = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &input_binding; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexAttributeDescriptions = &input_attrib; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit, "location 0 not consumed by vertex shader"); } TEST_F(VkLayerTest, CreatePipelineAttribLocationMismatch) { TEST_DESCRIPTION( "Test that a warning is produced for a location mismatch on vertex attributes. This flushes out bad behavior in the " "interface walker"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; const auto set_info = [&](CreatePipelineHelper &helper) { helper.vi_ci_.pVertexBindingDescriptions = &input_binding; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexAttributeDescriptions = &input_attrib; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; m_errorMonitor->SetUnexpectedError("Vertex shader consumes input at location 1 but not provided"); CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit, "location 0 not consumed by vertex shader"); } TEST_F(VkLayerTest, CreatePipelineAttribNotProvided) { TEST_DESCRIPTION("Test that an error is produced for a vertex shader input which is not provided by a vertex attribute"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 layout(location=0) in vec4 x; /* not provided */ void main(){ gl_Position = x; } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "Vertex shader consumes input at location 0 but not provided"); } TEST_F(VkLayerTest, CreatePipelineAttribTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type (float/int/uint) of an attribute and the " "vertex shader input that consumes it"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkVertexInputBindingDescription input_binding; memset(&input_binding, 0, sizeof(input_binding)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = R"glsl( #version 450 layout(location=0) in int x; /* attrib provided float */ void main(){ gl_Position = vec4(x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.vi_ci_.pVertexBindingDescriptions = &input_binding; helper.vi_ci_.vertexBindingDescriptionCount = 1; helper.vi_ci_.pVertexAttributeDescriptions = &input_attrib; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "location 0 does not match vertex shader input type"); } TEST_F(VkLayerTest, CreatePipelineDuplicateStage) { TEST_DESCRIPTION("Test that an error is produced for a pipeline containing multiple shaders for the same stage"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), helper.vs_->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-stage-00726"); } TEST_F(VkLayerTest, CreatePipelineMissingEntrypoint) { ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL, nullptr, "foo"); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-pName-00707"); } TEST_F(VkLayerTest, CreatePipelineDepthStencilRequired) { m_errorMonitor->SetDesiredFailureMsg( kErrorBit, "pDepthStencilState is NULL when rasterization is enabled and subpass uses a depth/stencil attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, bindStateFragShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.AppendDummy(); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); VkAttachmentDescription attachments[] = { { 0, VK_FORMAT_B8G8R8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }, { 0, VK_FORMAT_D16_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, }, }; VkAttachmentReference refs[] = { {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}, }; VkSubpassDescription subpass = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &refs[0], nullptr, &refs[1], 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, attachments, 1, &subpass, 0, nullptr}; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), rp); m_errorMonitor->VerifyFound(); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineTessPatchDecorationMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a variable output from the TCS without the patch decoration, but consumed in the TES " "with the decoration."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *tcsSource = R"glsl( #version 450 layout(location=0) out int x[]; layout(vertices=3) out; void main(){ gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1; gl_TessLevelInner[0] = 1; x[gl_InvocationID] = gl_InvocationID; } )glsl"; char const *tesSource = R"glsl( #version 450 layout(triangles, equal_spacing, cw) in; layout(location=0) patch in int x; void main(){ gl_Position.xyz = gl_TessCoord; gl_Position.w = x; } )glsl"; VkShaderObj tcs(this, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj tes(this, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; const auto set_info = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pTessellationState = &tsci; helper.gp_ci_.pInputAssemblyState = &iasci; helper.shader_stages_.emplace_back(tcs.GetStageCreateInfo()); helper.shader_stages_.emplace_back(tes.GetStageCreateInfo()); }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, "is per-vertex in tessellation control shader stage but per-patch in tessellation evaluation shader stage"); } TEST_F(VkLayerTest, CreatePipelineTessErrors) { TEST_DESCRIPTION("Test various errors when creating a graphics pipeline with tessellation stages active."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (!m_device->phy().features().tessellationShader) { printf("%s Device does not support tessellation shaders; skipped.\n", kSkipPrefix); return; } char const *tcsSource = R"glsl( #version 450 layout(vertices=3) out; void main(){ gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1; gl_TessLevelInner[0] = 1; } )glsl"; char const *tesSource = R"glsl( #version 450 layout(triangles, equal_spacing, cw) in; void main(){ gl_Position.xyz = gl_TessCoord; gl_Position.w = 0; } )glsl"; VkShaderObj tcs(this, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj tes(this, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; std::vector<VkPipelineShaderStageCreateInfo> shader_stages = {}; VkPipelineInputAssemblyStateCreateInfo iasci_bad = iasci; VkPipelineInputAssemblyStateCreateInfo *p_iasci = nullptr; VkPipelineTessellationStateCreateInfo tsci_bad = tsci; VkPipelineTessellationStateCreateInfo *p_tsci = nullptr; const auto set_info = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pTessellationState = p_tsci; helper.gp_ci_.pInputAssemblyState = p_iasci; helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.shader_stages_.insert(helper.shader_stages_.end(), shader_stages.begin(), shader_stages.end()); }; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; // otherwise we get a failure about invalid topology p_iasci = &iasci_bad; // Pass a tess control shader without a tess eval shader shader_stages = {tcs.GetStageCreateInfo()}; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pStages-00729"); // Pass a tess eval shader without a tess control shader shader_stages = {tes.GetStageCreateInfo()}; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pStages-00730"); p_iasci = &iasci; shader_stages = {}; // Pass patch topology without tessellation shaders CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-topology-00737"); shader_stages = {tcs.GetStageCreateInfo(), tes.GetStageCreateInfo()}; // Pass a NULL pTessellationState (with active tessellation shader stages) CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pStages-00731"); // Pass an invalid pTessellationState (bad sType) tsci_bad.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO; p_tsci = &tsci_bad; shader_stages = {tcs.GetStageCreateInfo(), tes.GetStageCreateInfo()}; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineTessellationStateCreateInfo-sType-sType"); // Pass out-of-range patchControlPoints p_iasci = &iasci; tsci_bad = tsci; tsci_bad.patchControlPoints = 0; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); tsci_bad.patchControlPoints = m_device->props.limits.maxTessellationPatchSize + 1; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineTessellationStateCreateInfo-patchControlPoints-01214"); p_tsci = &tsci; // Pass an invalid primitive topology iasci_bad = iasci; iasci_bad.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST; p_iasci = &iasci_bad; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pStages-00736"); } TEST_F(VkLayerTest, CreatePipelineAttribBindingConflict) { TEST_DESCRIPTION( "Test that an error is produced for a vertex attribute setup where multiple bindings provide the same location"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); /* Two binding descriptions for binding 0 */ VkVertexInputBindingDescription input_bindings[2]; memset(input_bindings, 0, sizeof(input_bindings)); VkVertexInputAttributeDescription input_attrib; memset(&input_attrib, 0, sizeof(input_attrib)); input_attrib.format = VK_FORMAT_R32_SFLOAT; char const *vsSource = R"glsl( #version 450 layout(location=0) in float x; /* attrib provided float */ void main(){ gl_Position = vec4(x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.vi_ci_.pVertexBindingDescriptions = input_bindings; helper.vi_ci_.vertexBindingDescriptionCount = 2; helper.vi_ci_.pVertexAttributeDescriptions = &input_attrib; helper.vi_ci_.vertexAttributeDescriptionCount = 1; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineVertexInputStateCreateInfo-pVertexBindingDescriptions-00616"); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotWritten) { TEST_DESCRIPTION( "Test that an error is produced for a fragment shader which does not provide an output for one of the pipeline's color " "attachments"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj fs(this, bindStateMinimalShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.cb_attachments_[0].colorWriteMask = 1; }; CreatePipelineHelper::OneshotTest(*this, set_info, kWarningBit, "Attachment 0 not written by fragment shader"); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputNotConsumed) { TEST_DESCRIPTION( "Test that a warning is produced for a fragment shader which provides a spurious output with no matching attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout(location=0) out vec4 x; layout(location=1) out vec4 y; /* no matching attachment for this */ void main(){ x = vec4(1); y = vec4(1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kWarningBit, "fragment shader writes to output location 1 with no matching attachment"); } TEST_F(VkLayerTest, CreatePipelineFragmentNoOutputLocation0ButAlphaToCoverageEnabled) { TEST_DESCRIPTION("Test that an error is produced when alpha to coverage is enabled but no output at location 0 is declared."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(0u)); VkShaderObj fs(this, bindStateMinimalShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineMultisampleStateCreateInfo ms_state_ci = LvlInitStruct<VkPipelineMultisampleStateCreateInfo>(); ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state_ci.alphaToCoverageEnable = VK_TRUE; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.pipe_ms_state_ci_ = ms_state_ci; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, "fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled."); } TEST_F(VkLayerTest, CreatePipelineFragmentNoAlphaLocation0ButAlphaToCoverageEnabled) { TEST_DESCRIPTION( "Test that an error is produced when alpha to coverage is enabled but output at location 0 doesn't have alpha component."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget(0u)); char const *fsSource = R"glsl( #version 450 layout(location=0) out vec3 x; void main(){ x = vec3(1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineMultisampleStateCreateInfo ms_state_ci = LvlInitStruct<VkPipelineMultisampleStateCreateInfo>(); ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT; ms_state_ci.alphaToCoverageEnable = VK_TRUE; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.pipe_ms_state_ci_ = ms_state_ci; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, "fragment shader doesn't declare alpha output at location 0 even though alpha to coverage is enabled."); } TEST_F(VkLayerTest, CreatePipelineFragmentOutputTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a mismatch between the fundamental type of an fragment shader output variable, and the " "format of the corresponding attachment"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout(location=0) out ivec4 x; /* not UNORM */ void main(){ x = ivec4(1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kWarningBit, "does not match fragment shader output type"); } TEST_F(VkLayerTest, CreatePipelineExceedVertexMaxComponentsWithBuiltins) { TEST_DESCRIPTION("Test if the max componenets checks are being checked from OpMemberDecorate built-ins"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = nullptr; if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceLimitsEXT, fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); props.limits.maxVertexOutputComponents = 128; props.limits.maxFragmentInputComponents = 128; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // vec4 == 4 components // This gives 124 which is just below the set max limit const uint32_t numVec4 = 31; std::string vsSourceStr = "#version 450\n" "layout(location = 0) out block {\n"; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "vec4 v" + std::to_string(i) + ";\n"; } vsSourceStr += "} outVs;\n" "\n" "void main() {\n" " vec4 x = vec4(1.0);\n"; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "outVs.v" + std::to_string(i) + " = x;\n"; } // GLSL is defined to have a struct for the vertex shader built-in: // // out gl_PerVertex { // vec4 gl_Position; // float gl_PointSize; // float gl_ClipDistance[]; // float gl_CullDistance[]; // } gl_out[]; // // by including gl_Position here 7 extra vertex input components are added pushing it over the 128 // 124 + 7 > 128 limit vsSourceStr += " gl_Position = x;\n"; vsSourceStr += "}"; std::string fsSourceStr = "#version 450\n" "layout(location = 0) in block {\n"; for (uint32_t i = 0; i < numVec4; i++) { fsSourceStr += "vec4 v" + std::to_string(i) + ";\n"; } fsSourceStr += "} inPs;\n" "\n" "layout(location=0) out vec4 color;\n" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(this, vsSourceStr, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSourceStr, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; // maxFragmentInputComponents is not reached because GLSL should not be including any input fragment stage built-ins by default // only maxVertexOutputComponents is reached CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-Location-06272"); } TEST_F(VkLayerTest, CreatePipelineExceedFragmentMaxComponentsWithBuiltins) { TEST_DESCRIPTION("Test if the max componenets checks are being checked from OpDecorate built-ins"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = nullptr; if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceLimitsEXT, fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Failed to device profile layer.\n", kSkipPrefix); return; } VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); props.limits.maxVertexOutputComponents = 128; props.limits.maxFragmentInputComponents = 128; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // vec4 == 4 components // This gives 128 which is the max limit const uint32_t numVec4 = 32; // 32 * 4 == 128 std::string vsSourceStr = "#version 450\n" "layout(location = 0) out block {\n"; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "vec4 v" + std::to_string(i) + ";\n"; } vsSourceStr += "} outVs;\n" "\n" "void main() {\n" " vec4 x = vec4(1.0);\n"; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "outVs.v" + std::to_string(i) + " = x;\n"; } vsSourceStr += "}"; std::string fsSourceStr = "#version 450\n" "layout(location = 0) in block {\n"; for (uint32_t i = 0; i < numVec4; i++) { fsSourceStr += "vec4 v" + std::to_string(i) + ";\n"; } // By added gl_PointCoord it adds 2 more components to the fragment input stage fsSourceStr += "} inPs;\n" "\n" "layout(location=0) out vec4 color;\n" "\n" "void main(){\n" " color = vec4(1) * gl_PointCoord.x;\n" "}\n"; VkShaderObj vs(this, vsSourceStr, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSourceStr, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; // maxVertexOutputComponents is not reached because GLSL should not be including any output vertex stage built-ins // only maxFragmentInputComponents is reached CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-Location-06272"); } TEST_F(VkLayerTest, CreatePipelineExceedMaxVertexOutputComponents) { TEST_DESCRIPTION( "Test that an error is produced when the number of output components from the vertex stage exceeds the device limit"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // overflow == 0: no overflow, 1: too many components, 2: location number too large for (int overflow = 0; overflow < 3; ++overflow) { m_errorMonitor->Reset(); const uint32_t maxVsOutComp = m_device->props.limits.maxVertexOutputComponents + overflow; std::string vsSourceStr = "#version 450\n\n"; const uint32_t numVec4 = maxVsOutComp / 4; uint32_t location = 0; if (overflow == 2) { vsSourceStr += "layout(location=" + std::to_string(numVec4 + 1) + ") out vec4 vn;\n"; } else { for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec4 v" + std::to_string(i) + ";\n"; location += 1; } const uint32_t remainder = maxVsOutComp % 4; if (remainder != 0) { if (remainder == 1) { vsSourceStr += "layout(location=" + std::to_string(location) + ") out float" + " vn;\n"; } else { vsSourceStr += "layout(location=" + std::to_string(location) + ") out vec" + std::to_string(remainder) + " vn;\n"; } location += 1; } } vsSourceStr += "void main(){\n" "}\n"; std::string fsSourceStr = R"glsl( #version 450 layout(location=0) out vec4 color; void main(){ color = vec4(1); } )glsl"; VkShaderObj vs(this, vsSourceStr, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSourceStr, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; switch (overflow) { case 2: // just component limit (maxVertexOutputComponents) CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-Location-06272"); break; case 1: // component and location limit (maxVertexOutputComponents) CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; default: assert(0); case 0: CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); break; } } } TEST_F(VkLayerTest, CreatePipelineExceedMaxComponentsBlocks) { TEST_DESCRIPTION("Test if the max componenets checks are done properly when in a single block"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // To make the test simple, just make sure max is 128 or less (most HW is 64 or 128) if (m_device->props.limits.maxVertexOutputComponents > 128 || m_device->props.limits.maxFragmentInputComponents > 128) { printf("%s maxVertexOutputComponents or maxFragmentInputComponents too high for test; skipped.\n", kSkipPrefix); return; } // vec4 == 4 components // so this put the test over 128 const uint32_t numVec4 = 33; std::string vsSourceStr = "#version 450\n" "layout(location = 0) out block {\n"; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "vec4 v" + std::to_string(i) + ";\n"; } vsSourceStr += "} outVs;\n" "\n" "void main() {\n" " vec4 x = vec4(1.0);\n"; for (uint32_t i = 0; i < numVec4; i++) { vsSourceStr += "outVs.v" + std::to_string(i) + " = x;\n"; } vsSourceStr += "}"; std::string fsSourceStr = "#version 450\n" "layout(location = 0) in block {\n"; for (uint32_t i = 0; i < numVec4; i++) { fsSourceStr += "vec4 v" + std::to_string(i) + ";\n"; } fsSourceStr += "} inPs;\n" "\n" "layout(location=0) out vec4 color;\n" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj vs(this, vsSourceStr, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSourceStr, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; // 1 for maxVertexOutputComponents and 1 for maxFragmentInputComponents CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); } TEST_F(VkLayerTest, CreatePipelineExceedMaxTessellationControlInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of per-vertex input and/or output components to the tessellation control " "stage exceeds the device limit"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // overflow == 0: no overflow, 1: too many components, 2: location number too large for (int overflow = 0; overflow < 3; ++overflow) { m_errorMonitor->Reset(); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.tessellationShader) { printf("%s tessellation shader stage(s) unsupported.\n", kSkipPrefix); return; } // Tessellation control stage std::string tcsSourceStr = "#version 450\n" "\n"; // Input components const uint32_t maxTescInComp = m_device->props.limits.maxTessellationControlPerVertexInputComponents + overflow; const uint32_t numInVec4 = maxTescInComp / 4; uint32_t inLocation = 0; if (overflow == 2) { tcsSourceStr += "layout(location=" + std::to_string(numInVec4 + 1) + ") in vec4 vnIn[];\n"; } else { for (uint32_t i = 0; i < numInVec4; i++) { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxTescInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { tcsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } } // Output components const uint32_t maxTescOutComp = m_device->props.limits.maxTessellationControlPerVertexOutputComponents + overflow; const uint32_t numOutVec4 = maxTescOutComp / 4; uint32_t outLocation = 0; if (overflow == 2) { tcsSourceStr += "layout(location=" + std::to_string(numOutVec4 + 1) + ") out vec4 vnOut[3];\n"; } else { for (uint32_t i = 0; i < numOutVec4; i++) { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out[3];\n"; outLocation += 1; } const uint32_t outRemainder = maxTescOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut[3];\n"; } else { tcsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut[3];\n"; } outLocation += 1; } } tcsSourceStr += "layout(vertices=3) out;\n"; // Finalize tcsSourceStr += "\n" "void main(){\n" "}\n"; VkShaderObj tcs(this, tcsSourceStr, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj tes(this, bindStateTeshaderText, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = LvlInitStruct<VkPipelineInputAssemblyStateCreateInfo>(); inputAssemblyInfo.flags = 0; inputAssemblyInfo.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; inputAssemblyInfo.primitiveRestartEnable = VK_FALSE; VkPipelineTessellationStateCreateInfo tessInfo = LvlInitStruct<VkPipelineTessellationStateCreateInfo>(); tessInfo.flags = 0; tessInfo.patchControlPoints = 3; m_errorMonitor->SetUnexpectedError("UNASSIGNED-CoreValidation-Shader-InputNotProduced"); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), tcs.GetStageCreateInfo(), tes.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.gp_ci_.pTessellationState = &tessInfo; helper.gp_ci_.pInputAssemblyState = &inputAssemblyInfo; }; // maxTessellationControlPerVertexInputComponents and maxTessellationControlPerVertexOutputComponents switch (overflow) { case 2: // in and out component limit CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; case 1: // (in and out component limit) and (in and out location limit) CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; default: assert(0); case 0: CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); break; } } } TEST_F(VkLayerTest, CreatePipelineExceedMaxTessellationEvaluationInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of input and/or output components to the tessellation evaluation stage " "exceeds the device limit"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // overflow == 0: no overflow, 1: too many components, 2: location number too large for (int overflow = 0; overflow < 3; ++overflow) { m_errorMonitor->Reset(); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.tessellationShader) { printf("%s tessellation shader stage(s) unsupported.\n", kSkipPrefix); return; } // Tessellation evaluation stage std::string tesSourceStr = "#version 450\n" "\n" "layout (triangles) in;\n" "\n"; // Input components const uint32_t maxTeseInComp = m_device->props.limits.maxTessellationEvaluationInputComponents + overflow; const uint32_t numInVec4 = maxTeseInComp / 4; uint32_t inLocation = 0; if (overflow == 2) { tesSourceStr += "layout(location=" + std::to_string(numInVec4 + 1) + ") in vec4 vnIn[];\n"; } else { for (uint32_t i = 0; i < numInVec4; i++) { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxTeseInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { tesSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } } // Output components const uint32_t maxTeseOutComp = m_device->props.limits.maxTessellationEvaluationOutputComponents + overflow; const uint32_t numOutVec4 = maxTeseOutComp / 4; uint32_t outLocation = 0; if (overflow == 2) { tesSourceStr += "layout(location=" + std::to_string(numOutVec4 + 1) + ") out vec4 vnOut;\n"; } else { for (uint32_t i = 0; i < numOutVec4; i++) { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out;\n"; outLocation += 1; } const uint32_t outRemainder = maxTeseOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut;\n"; } else { tesSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut;\n"; } outLocation += 1; } } // Finalize tesSourceStr += "\n" "void main(){\n" "}\n"; VkShaderObj tcs(this, bindStateTscShaderText, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj tes(this, tesSourceStr, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); VkPipelineInputAssemblyStateCreateInfo inputAssemblyInfo = LvlInitStruct<VkPipelineInputAssemblyStateCreateInfo>(); inputAssemblyInfo.flags = 0; inputAssemblyInfo.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; inputAssemblyInfo.primitiveRestartEnable = VK_FALSE; VkPipelineTessellationStateCreateInfo tessInfo = LvlInitStruct<VkPipelineTessellationStateCreateInfo>(); tessInfo.flags = 0; tessInfo.patchControlPoints = 3; m_errorMonitor->SetUnexpectedError("UNASSIGNED-CoreValidation-Shader-InputNotProduced"); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), tcs.GetStageCreateInfo(), tes.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.gp_ci_.pTessellationState = &tessInfo; helper.gp_ci_.pInputAssemblyState = &inputAssemblyInfo; }; // maxTessellationEvaluationInputComponents and maxTessellationEvaluationOutputComponents switch (overflow) { case 2: // in and out component limit CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; case 1: // (in and out component limit) and (in and out location limit) CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; default: assert(0); case 0: CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); break; } } } TEST_F(VkLayerTest, CreatePipelineExceedMaxGeometryInputOutputComponents) { TEST_DESCRIPTION( "Test that errors are produced when the number of input and/or output components to the geometry stage exceeds the device " "limit"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // overflow == 0: no overflow, 1: too many components, 2: location number too large for (int overflow = 0; overflow < 3; ++overflow) { m_errorMonitor->Reset(); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.geometryShader) { printf("%s geometry shader stage unsupported.\n", kSkipPrefix); return; } std::string gsSourceStr = "#version 450\n" "\n" "layout(triangles) in;\n" "layout(invocations=1) in;\n"; // Input components const uint32_t maxGeomInComp = m_device->props.limits.maxGeometryInputComponents + overflow; const uint32_t numInVec4 = maxGeomInComp / 4; uint32_t inLocation = 0; if (overflow == 2) { gsSourceStr += "layout(location=" + std::to_string(numInVec4 + 1) + ") in vec4 vnIn[];\n"; } else { for (uint32_t i = 0; i < numInVec4; i++) { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec4 v" + std::to_string(i) + "In[];\n"; inLocation += 1; } const uint32_t inRemainder = maxGeomInComp % 4; if (inRemainder != 0) { if (inRemainder == 1) { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in float" + " vnIn[];\n"; } else { gsSourceStr += "layout(location=" + std::to_string(inLocation) + ") in vec" + std::to_string(inRemainder) + " vnIn[];\n"; } inLocation += 1; } } // Output components const uint32_t maxGeomOutComp = m_device->props.limits.maxGeometryOutputComponents + overflow; const uint32_t numOutVec4 = maxGeomOutComp / 4; uint32_t outLocation = 0; if (overflow == 2) { gsSourceStr += "layout(location=" + std::to_string(numOutVec4) + ") out vec4 vnOut;\n"; } else { for (uint32_t i = 0; i < numOutVec4; i++) { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec4 v" + std::to_string(i) + "Out;\n"; outLocation += 1; } const uint32_t outRemainder = maxGeomOutComp % 4; if (outRemainder != 0) { if (outRemainder == 1) { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out float" + " vnOut;\n"; } else { gsSourceStr += "layout(location=" + std::to_string(outLocation) + ") out vec" + std::to_string(outRemainder) + " vnOut;\n"; } outLocation += 1; } } // Finalize int max_vertices = overflow ? (m_device->props.limits.maxGeometryTotalOutputComponents / maxGeomOutComp + 1) : 1; gsSourceStr += "layout(triangle_strip, max_vertices = " + std::to_string(max_vertices) + ") out;\n" "\n" "void main(){\n" "}\n"; VkShaderObj gs(this, gsSourceStr, VK_SHADER_STAGE_GEOMETRY_BIT); m_errorMonitor->SetUnexpectedError("UNASSIGNED-CoreValidation-Shader-InputNotProduced"); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; // maxGeometryInputComponents and maxGeometryOutputComponents switch (overflow) { case 2: // in and out component limit CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; case 1: // (in and out component limit) and (in and out location limit) and maxGeometryTotalOutputComponents CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; default: assert(0); case 0: CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); break; } } } TEST_F(VkLayerTest, CreatePipelineExceedMaxFragmentInputComponents) { TEST_DESCRIPTION( "Test that an error is produced when the number of input components from the fragment stage exceeds the device limit"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // overflow == 0: no overflow, 1: too many components, 2: location number too large for (int overflow = 0; overflow < 3; ++overflow) { m_errorMonitor->Reset(); const uint32_t maxFsInComp = m_device->props.limits.maxFragmentInputComponents + overflow; std::string fsSourceStr = "#version 450\n\n"; const uint32_t numVec4 = maxFsInComp / 4; uint32_t location = 0; if (overflow == 2) { fsSourceStr += "layout(location=" + std::to_string(numVec4 + 1) + ") in float" + " vn;\n"; } else { for (uint32_t i = 0; i < numVec4; i++) { fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec4 v" + std::to_string(i) + ";\n"; location += 1; } const uint32_t remainder = maxFsInComp % 4; if (remainder != 0) { if (remainder == 1) { fsSourceStr += "layout(location=" + std::to_string(location) + ") in float" + " vn;\n"; } else { fsSourceStr += "layout(location=" + std::to_string(location) + ") in vec" + std::to_string(remainder) + " vn;\n"; } location += 1; } } fsSourceStr += "layout(location=0) out vec4 color;" "\n" "void main(){\n" " color = vec4(1);\n" "}\n"; VkShaderObj fs(this, fsSourceStr, VK_SHADER_STAGE_FRAGMENT_BIT); m_errorMonitor->SetUnexpectedError("UNASSIGNED-CoreValidation-Shader-InputNotProduced"); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; switch (overflow) { case 2: // just component limit (maxFragmentInputComponents) CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-Location-06272"); break; case 1: // component and location limit (maxFragmentInputComponents) CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-RuntimeSpirv-Location-06272", "VUID-RuntimeSpirv-Location-06272"}); break; default: assert(0); case 0: CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); break; } } } TEST_F(VkLayerTest, CreatePipelineExceedMaxGeometryInstanceVertexCount) { TEST_DESCRIPTION( "Test that errors are produced when the number of output vertices/instances in the geometry stage exceeds the device " "limit"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); for (int overflow = 0; overflow < 2; ++overflow) { m_errorMonitor->Reset(); VkPhysicalDeviceFeatures feat; vk::GetPhysicalDeviceFeatures(gpu(), &feat); if (!feat.geometryShader) { printf("%s geometry shader stage unsupported.\n", kSkipPrefix); return; } std::string gsSourceStr = R"( OpCapability Geometry OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" OpExecutionMode %main InputPoints OpExecutionMode %main OutputTriangleStrip )"; if (overflow) { gsSourceStr += "OpExecutionMode %main Invocations " + std::to_string(m_device->props.limits.maxGeometryShaderInvocations + 1) + "\n\ OpExecutionMode %main OutputVertices " + std::to_string(m_device->props.limits.maxGeometryOutputVertices + 1); } else { gsSourceStr += R"( OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputVertices 1 )"; } gsSourceStr += R"( OpSource GLSL 450 %void = OpTypeVoid %3 = OpTypeFunction %void %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; VkShaderObj gs(this, gsSourceStr, VK_SHADER_STAGE_GEOMETRY_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; if (overflow) { CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, vector<string>{"VUID-VkPipelineShaderStageCreateInfo-stage-00714", "VUID-VkPipelineShaderStageCreateInfo-stage-00715"}); } else { CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); } } } TEST_F(VkLayerTest, CreatePipelineUniformBlockNotProvided) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming a uniform block which has no corresponding binding in the pipeline " "layout"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "not declared in pipeline layout"); ASSERT_NO_FATAL_FAILURE(Init()); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, bindStateFragUniformShaderText, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); /* set up CB 0; type is UNORM by default */ pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetObj descriptorSet(m_device); descriptorSet.CreateVKDescriptorSet(m_commandBuffer); pipe.CreateVKPipeline(descriptorSet.GetPipelineLayout(), renderPass()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelinePushConstantsNotInLayout) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming push constants which are not provided in the pipeline layout"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *vsSource = R"glsl( #version 450 layout(push_constant, std430) uniform foo { float x; } consts; void main(){ gl_Position = vec4(consts.x); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {vs.GetStageCreateInfo(), pipe.fs_->GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); /* should have generated an error -- no push constant ranges provided! */ m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-layout-00756"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissing) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x; layout(location=0) out vec4 color; void main() { color = subpassLoad(x); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "consumes input attachment index 0 but not provided in subpass"); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentTypeMismatch) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment with a format having a different fundamental " "type"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "input attachment 0 format of VK_FORMAT_R8G8B8A8_UINT does not match"); ASSERT_NO_FATAL_FAILURE(Init()); char const *fsSource = R"glsl( #version 450 layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x; layout(location=0) out vec4 color; void main() { color = subpassLoad(x); } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); VkAttachmentDescription descs[2] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {0, VK_FORMAT_R8G8B8A8_UINT, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_ATTACHMENT_LOAD_OP_LOAD, VK_ATTACHMENT_STORE_OP_STORE, VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL}, }; VkAttachmentReference color = { 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, }; VkAttachmentReference input = { 1, VK_IMAGE_LAYOUT_GENERAL, }; VkSubpassDescription sd = {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 1, &input, 1, &color, nullptr, nullptr, 0, nullptr}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 2, descs, 1, &sd, 0, nullptr}; VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp); ASSERT_VK_SUCCESS(err); // error here. pipe.CreateVKPipeline(pl.handle(), rp); m_errorMonitor->VerifyFound(); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } TEST_F(VkLayerTest, CreatePipelineInputAttachmentMissingArray) { TEST_DESCRIPTION( "Test that an error is produced for a shader consuming an input attachment which is not included in the subpass " "description -- array case"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput xs[1]; layout(location=0) out vec4 color; void main() { color = subpassLoad(xs[0]); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 2, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "consumes input attachment index 0 but not provided in subpass"); } TEST_F(VkLayerTest, CreateComputePipelineMissingDescriptor) { TEST_DESCRIPTION( "Test that an error is produced for a compute pipeline consuming a descriptor which is not provided in the pipeline " "layout"); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = R"glsl( #version 450 layout(local_size_x=1) in; layout(set=0, binding=0) buffer block { vec4 x; }; void main(){ x = vec4(1); } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT)); pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "Shader uses descriptor slot 0.0"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateComputePipelineDescriptorTypeMismatch) { TEST_DESCRIPTION("Test that an error is produced for a pipeline consuming a descriptor-backed resource of a mismatched type"); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = R"glsl( #version 450 layout(local_size_x=1) in; layout(set=0, binding=0) buffer block { vec4 x; }; void main() { x.x = 1.0f; } )glsl"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT)); helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}}; }; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "but descriptor of type VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER"); } TEST_F(VkLayerTest, MultiplePushDescriptorSets) { TEST_DESCRIPTION("Verify an error message for multiple push descriptor sets."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME; skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PUSH_DESCRIPTOR_EXTENSION_NAME); } else { printf("%s Push Descriptors Extension not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto push_descriptor_prop = GetPushDescriptorProperties(instance(), gpu()); if (push_descriptor_prop.maxPushDescriptors < 1) { // Some implementations report an invalid maxPushDescriptors of 0 printf("%s maxPushDescriptors is zero, skipping tests\n", kSkipPrefix); return; } VkDescriptorSetLayoutBinding dsl_binding = {}; dsl_binding.binding = 0; dsl_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; dsl_binding.descriptorCount = 1; dsl_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; dsl_binding.pImmutableSamplers = NULL; const unsigned int descriptor_set_layout_count = 2; std::vector<VkDescriptorSetLayoutObj> ds_layouts; for (uint32_t i = 0; i < descriptor_set_layout_count; ++i) { dsl_binding.binding = i; ds_layouts.emplace_back(m_device, std::vector<VkDescriptorSetLayoutBinding>(1, dsl_binding), VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR); } const auto &ds_vk_layouts = MakeVkHandles<VkDescriptorSetLayout>(ds_layouts); VkPipelineLayout pipeline_layout; VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.pushConstantRangeCount = 0; pipeline_layout_ci.pPushConstantRanges = NULL; pipeline_layout_ci.setLayoutCount = ds_vk_layouts.size(); pipeline_layout_ci.pSetLayouts = ds_vk_layouts.data(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-00293"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, AMDMixedAttachmentSamplesValidateGraphicsPipeline) { TEST_DESCRIPTION("Verify an error message for an incorrect graphics pipeline rasterization sample count."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Set a mismatched sample count VkPipelineMultisampleStateCreateInfo ms_state_ci = LvlInitStruct<VkPipelineMultisampleStateCreateInfo>(); ms_state_ci.rasterizationSamples = VK_SAMPLE_COUNT_4_BIT; const auto set_info = [&](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_ = ms_state_ci; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-subpass-01505"); } TEST_F(VkLayerTest, FramebufferMixedSamplesNV) { TEST_DESCRIPTION("Verify VK_NV_framebuffer_mixed_samples."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); return; } VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); if (VK_TRUE != device_features.sampleRateShading) { printf("%s Test requires unsupported sampleRateShading feature.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkSampleCountFlagBits raster_samples; VkBool32 depth_test; VkBool32 sample_shading; uint32_t table_count; bool positiveTest; std::string vuid; }; std::vector<TestCase> test_cases = { {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 4, false, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 2, true, "VUID-VkPipelineCoverageModulationStateCreateInfoNV-coverageModulationTableEnable-01405"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, VK_TRUE, VK_FALSE, 1, false, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_8_BIT, VK_SAMPLE_COUNT_8_BIT, VK_TRUE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-01411"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_1_BIT, VK_FALSE, VK_FALSE, 1, false, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412"}, {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-01412"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_TRUE, 1, false, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkPipelineMultisampleStateCreateInfo-rasterizationSamples-01415"}, {VK_SAMPLE_COUNT_1_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, VK_FALSE, VK_FALSE, 1, true, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757"}}; for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_D24_UNORM_S8_UINT; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = NULL; sp.pDepthStencilAttachment = &dr; VkRenderPassCreateInfo rpi = LvlInitStruct<VkRenderPassCreateInfo>(); rpi.attachmentCount = 2; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"); VkResult err = vk::CreateRenderPass(m_device->device(), &rpi, nullptr, &rp); m_errorMonitor->VerifyNotFound(); ASSERT_VK_SUCCESS(err); auto ds = lvl_init_struct<VkPipelineDepthStencilStateCreateInfo>(); auto cmi = lvl_init_struct<VkPipelineCoverageModulationStateCreateInfoNV>(); // Create a dummy modulation table that can be used for the positive // coverageModulationTableCount test. std::vector<float> cm_table{}; const auto break_samples = [&cmi, &rp, &ds, &cm_table, &test_case](CreatePipelineHelper &helper) { cm_table.resize(test_case.table_count); cmi.flags = 0; cmi.coverageModulationTableEnable = (test_case.table_count > 1); cmi.coverageModulationTableCount = test_case.table_count; cmi.pCoverageModulationTable = cm_table.data(); ds.depthTestEnable = test_case.depth_test; helper.pipe_ms_state_ci_.pNext = &cmi; helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.pipe_ms_state_ci_.sampleShadingEnable = test_case.sample_shading; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = &ds; }; CreatePipelineHelper::OneshotTest(*this, break_samples, kErrorBit, test_case.vuid, test_case.positiveTest); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FramebufferMixedSamples) { TEST_DESCRIPTION("Verify that the expected VUIds are hits when VK_NV_framebuffer_mixed_samples is disabled."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const VkFormat ds_format = FindSupportedDepthStencilFormat(gpu()); if (ds_format == VK_FORMAT_UNDEFINED) { printf("%s No Depth + Stencil format found rest of tests skipped.\n", kSkipPrefix); return; } struct TestCase { VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkSampleCountFlagBits raster_samples; bool positiveTest; }; std::vector<TestCase> test_cases = { {VK_SAMPLE_COUNT_2_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, false}, // Fails vk::CreateRenderPass and vk::CreateGraphicsPipeline {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_8_BIT, false}, // Fails vk::CreateGraphicsPipeline {VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, VK_SAMPLE_COUNT_4_BIT, true} // Pass }; for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = ds_format; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = NULL; sp.pDepthStencilAttachment = &dr; VkRenderPassCreateInfo rpi = LvlInitStruct<VkRenderPassCreateInfo>(); rpi.attachmentCount = 2; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; if (test_case.color_samples == test_case.depth_samples) { m_errorMonitor->ExpectSuccess(); } else { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSubpassDescription-pDepthStencilAttachment-01418"); } VkResult err = vk::CreateRenderPass(m_device->device(), &rpi, nullptr, &rp); if (test_case.color_samples == test_case.depth_samples) { m_errorMonitor->VerifyNotFound(); } else { m_errorMonitor->VerifyFound(); continue; } ASSERT_VK_SUCCESS(err); VkPipelineDepthStencilStateCreateInfo ds = LvlInitStruct<VkPipelineDepthStencilStateCreateInfo>(); const auto break_samples = [&rp, &ds, &test_case](CreatePipelineHelper &helper) { helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = &ds; }; CreatePipelineHelper::OneshotTest(*this, break_samples, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-subpass-00757", test_case.positiveTest); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FramebufferMixedSamplesCoverageReduction) { TEST_DESCRIPTION("Verify VK_NV_coverage_reduction_mode."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_COVERAGE_REDUCTION_MODE_EXTENSION_NAME); return; } if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME); } else if (DeviceExtensionSupported(gpu(), nullptr, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); } else { printf("%s Neither %s nor %s are supported, skipping tests\n", kSkipPrefix, VK_NV_FRAMEBUFFER_MIXED_SAMPLES_EXTENSION_NAME, VK_AMD_MIXED_ATTACHMENT_SAMPLES_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkSampleCountFlagBits raster_samples; VkSampleCountFlagBits color_samples; VkSampleCountFlagBits depth_samples; VkCoverageReductionModeNV coverage_reduction_mode; bool positiveTest; std::string vuid; }; std::vector<TestCase> test_cases; uint32_t combination_count = 0; std::vector<VkFramebufferMixedSamplesCombinationNV> combinations; PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV = (PFN_vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV)vk::GetInstanceProcAddr( instance(), "vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV"); ASSERT_NO_FATAL_FAILURE(vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(gpu(), &combination_count, nullptr)); if (combination_count < 1) { printf("%s No mixed sample combinations are supported, skipping tests.\n", kSkipPrefix); return; } combinations.resize(combination_count); ASSERT_NO_FATAL_FAILURE( vkGetPhysicalDeviceSupportedFramebufferMixedSamplesCombinationsNV(gpu(), &combination_count, &combinations[0])); // Pick the first supported combination for a positive test. test_cases.push_back({combinations[0].rasterizationSamples, static_cast<VkSampleCountFlagBits>(combinations[0].colorSamples), static_cast<VkSampleCountFlagBits>(combinations[0].depthStencilSamples), combinations[0].coverageReductionMode, true, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722"}); VkSampleCountFlags fb_sample_counts = m_device->phy().properties().limits.framebufferDepthSampleCounts; int max_sample_count = VK_SAMPLE_COUNT_64_BIT; while (max_sample_count > VK_SAMPLE_COUNT_1_BIT) { if (fb_sample_counts & max_sample_count) { break; } max_sample_count /= 2; } // Look for a valid combination that is not in the supported list for a negative test. bool neg_comb_found = false; for (int mode = VK_COVERAGE_REDUCTION_MODE_TRUNCATE_NV; mode >= 0 && !neg_comb_found; mode--) { for (int rs = max_sample_count; rs >= VK_SAMPLE_COUNT_1_BIT && !neg_comb_found; rs /= 2) { for (int ds = rs; ds >= 0 && !neg_comb_found; ds -= rs) { for (int cs = rs / 2; cs > 0 && !neg_comb_found; cs /= 2) { bool combination_found = false; for (const auto &combination : combinations) { if (mode == combination.coverageReductionMode && rs == combination.rasterizationSamples && ds & combination.depthStencilSamples && cs & combination.colorSamples) { combination_found = true; break; } } if (!combination_found) { neg_comb_found = true; test_cases.push_back({static_cast<VkSampleCountFlagBits>(rs), static_cast<VkSampleCountFlagBits>(cs), static_cast<VkSampleCountFlagBits>(ds), static_cast<VkCoverageReductionModeNV>(mode), false, "VUID-VkGraphicsPipelineCreateInfo-coverageReductionMode-02722"}); } } } } } for (const auto &test_case : test_cases) { VkAttachmentDescription att[2] = {{}, {}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = test_case.color_samples; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_D24_UNORM_S8_UINT; att[1].samples = test_case.depth_samples; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cr = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkAttachmentReference dr = {1, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = 1; sp.pColorAttachments = &cr; sp.pResolveAttachments = nullptr; sp.pDepthStencilAttachment = (test_case.depth_samples) ? &dr : nullptr; VkRenderPassCreateInfo rpi = LvlInitStruct<VkRenderPassCreateInfo>(); rpi.attachmentCount = (test_case.depth_samples) ? 2 : 1; rpi.pAttachments = att; rpi.subpassCount = 1; rpi.pSubpasses = &sp; VkRenderPass rp; ASSERT_VK_SUCCESS(vk::CreateRenderPass(m_device->device(), &rpi, nullptr, &rp)); VkPipelineDepthStencilStateCreateInfo dss = LvlInitStruct<VkPipelineDepthStencilStateCreateInfo>(); VkPipelineCoverageReductionStateCreateInfoNV crs = LvlInitStruct<VkPipelineCoverageReductionStateCreateInfoNV>(); const auto break_samples = [&rp, &dss, &crs, &test_case](CreatePipelineHelper &helper) { crs.flags = 0; crs.coverageReductionMode = test_case.coverage_reduction_mode; helper.pipe_ms_state_ci_.pNext = &crs; helper.pipe_ms_state_ci_.rasterizationSamples = test_case.raster_samples; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pDepthStencilState = (test_case.depth_samples) ? &dss : nullptr; }; CreatePipelineHelper::OneshotTest(*this, break_samples, kErrorBit, test_case.vuid, test_case.positiveTest); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, FragmentCoverageToColorNV) { TEST_DESCRIPTION("Verify VK_NV_fragment_coverage_to_color."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FRAGMENT_COVERAGE_TO_COLOR_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); struct TestCase { VkFormat format; VkBool32 enabled; uint32_t location; bool positive; }; const std::array<TestCase, 9> test_cases = {{ {VK_FORMAT_R8G8B8A8_UNORM, VK_FALSE, 0, true}, {VK_FORMAT_R8_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R16_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R16_SINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_UINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_SINT, VK_TRUE, 1, true}, {VK_FORMAT_R32_SINT, VK_TRUE, 2, false}, {VK_FORMAT_R8_SINT, VK_TRUE, 3, false}, {VK_FORMAT_R8G8B8A8_UNORM, VK_TRUE, 1, false}, }}; for (const auto &test_case : test_cases) { std::array<VkAttachmentDescription, 2> att = {{{}, {}}}; att[0].format = VK_FORMAT_R8G8B8A8_UNORM; att[0].samples = VK_SAMPLE_COUNT_1_BIT; att[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; att[1].format = VK_FORMAT_R8G8B8A8_UNORM; att[1].samples = VK_SAMPLE_COUNT_1_BIT; att[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; att[1].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; if (test_case.location < att.size()) { att[test_case.location].format = test_case.format; } const std::array<VkAttachmentReference, 3> cr = {{{0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {1, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, {VK_ATTACHMENT_UNUSED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}}}; VkSubpassDescription sp = {}; sp.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; sp.colorAttachmentCount = cr.size(); sp.pColorAttachments = cr.data(); VkRenderPassCreateInfo rpi = LvlInitStruct<VkRenderPassCreateInfo>(); rpi.attachmentCount = att.size(); rpi.pAttachments = att.data(); rpi.subpassCount = 1; rpi.pSubpasses = &sp; const std::array<VkPipelineColorBlendAttachmentState, 3> cba = {{{}, {}, {}}}; VkPipelineColorBlendStateCreateInfo cbi = LvlInitStruct<VkPipelineColorBlendStateCreateInfo>(); cbi.attachmentCount = cba.size(); cbi.pAttachments = cba.data(); VkRenderPass rp; VkResult err = vk::CreateRenderPass(m_device->device(), &rpi, nullptr, &rp); ASSERT_VK_SUCCESS(err); VkPipelineCoverageToColorStateCreateInfoNV cci = LvlInitStruct<VkPipelineCoverageToColorStateCreateInfoNV>(); const auto break_samples = [&cci, &cbi, &rp, &test_case](CreatePipelineHelper &helper) { cci.coverageToColorEnable = test_case.enabled; cci.coverageToColorLocation = test_case.location; helper.pipe_ms_state_ci_.pNext = &cci; helper.gp_ci_.renderPass = rp; helper.gp_ci_.pColorBlendState = &cbi; }; CreatePipelineHelper::OneshotTest(*this, break_samples, kErrorBit, "VUID-VkPipelineCoverageToColorStateCreateInfoNV-coverageToColorEnable-01404", test_case.positive); vk::DestroyRenderPass(m_device->device(), rp, nullptr); } } TEST_F(VkLayerTest, ViewportSwizzleNV) { TEST_DESCRIPTION("Verify VK_NV_viewprot_swizzle."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_VIEWPORT_SWIZZLE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewportSwizzleNV invalid_swizzles = { VkViewportCoordinateSwizzleNV(-1), VkViewportCoordinateSwizzleNV(-1), VkViewportCoordinateSwizzleNV(-1), VkViewportCoordinateSwizzleNV(-1), }; VkPipelineViewportSwizzleStateCreateInfoNV vp_swizzle_state = { VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_SWIZZLE_STATE_CREATE_INFO_NV}; vp_swizzle_state.viewportCount = 1; vp_swizzle_state.pViewportSwizzles = &invalid_swizzles; const std::vector<std::string> expected_vuids = {"VUID-VkViewportSwizzleNV-x-parameter", "VUID-VkViewportSwizzleNV-y-parameter", "VUID-VkViewportSwizzleNV-z-parameter", "VUID-VkViewportSwizzleNV-w-parameter"}; auto break_swizzles = [&vp_swizzle_state](CreatePipelineHelper &helper) { helper.vp_state_ci_.pNext = &vp_swizzle_state; }; CreatePipelineHelper::OneshotTest(*this, break_swizzles, kErrorBit, expected_vuids); struct TestCase { VkBool32 rasterizerDiscardEnable; uint32_t vp_count; uint32_t swizzel_vp_count; bool positive; }; const std::array<TestCase, 3> test_cases = {{{VK_TRUE, 1, 2, true}, {VK_FALSE, 1, 1, true}, {VK_FALSE, 1, 2, false}}}; std::array<VkViewportSwizzleNV, 2> swizzles = { {{VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV}, {VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_X_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Y_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_Z_NV, VK_VIEWPORT_COORDINATE_SWIZZLE_POSITIVE_W_NV}}}; for (const auto &test_case : test_cases) { assert(test_case.vp_count <= swizzles.size()); vp_swizzle_state.viewportCount = test_case.swizzel_vp_count; vp_swizzle_state.pViewportSwizzles = swizzles.data(); auto break_vp_count = [&vp_swizzle_state, &test_case](CreatePipelineHelper &helper) { helper.rs_state_ci_.rasterizerDiscardEnable = test_case.rasterizerDiscardEnable; helper.vp_state_ci_.viewportCount = test_case.vp_count; helper.vp_state_ci_.pNext = &vp_swizzle_state; }; CreatePipelineHelper::OneshotTest(*this, break_vp_count, kErrorBit, "VUID-VkPipelineViewportSwizzleStateCreateInfoNV-viewportCount-01215", test_case.positive); } } TEST_F(VkLayerTest, CooperativeMatrixNV) { TEST_DESCRIPTION("Test VK_NV_cooperative_matrix."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_NV_COOPERATIVE_MATRIX_EXTENSION_NAME, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } // glslang will generate OpCapability VulkanMemoryModel and need entension enabled if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME)) { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_VULKAN_MEMORY_MODEL_EXTENSION_NAME); return; } if (IsPlatform(kMockICD) || DeviceSimulation()) { printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto float16_features = LvlInitStruct<VkPhysicalDeviceFloat16Int8FeaturesKHR>(); auto cooperative_matrix_features = LvlInitStruct<VkPhysicalDeviceCooperativeMatrixFeaturesNV>(&float16_features); auto memory_model_features = LvlInitStruct<VkPhysicalDeviceVulkanMemoryModelFeaturesKHR>(&cooperative_matrix_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&memory_model_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (memory_model_features.vulkanMemoryModel == VK_FALSE) { printf("%s vulkanMemoryModel feature not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); std::vector<VkDescriptorSetLayoutBinding> bindings(0); const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = R"glsl( #version 450 #extension GL_NV_cooperative_matrix : enable #extension GL_KHR_shader_subgroup_basic : enable #extension GL_KHR_memory_scope_semantics : enable #extension GL_EXT_shader_explicit_arithmetic_types_float16 : enable layout(local_size_x = 32) in; layout(constant_id = 0) const uint C0 = 1; layout(constant_id = 1) const uint C1 = 1; void main() { // Bad type fcoopmatNV<16, gl_ScopeSubgroup, 3, 5> badSize = fcoopmatNV<16, gl_ScopeSubgroup, 3, 5>(float16_t(0.0)); // Not a valid multiply when C0 != C1 fcoopmatNV<16, gl_ScopeSubgroup, C0, C1> A; fcoopmatNV<16, gl_ScopeSubgroup, C0, C1> B; fcoopmatNV<16, gl_ScopeSubgroup, C0, C1> C; coopMatMulAddNV(A, B, C); } )glsl"; const uint32_t specData[] = { 16, 8, }; VkSpecializationMapEntry entries[] = { {0, sizeof(uint32_t) * 0, sizeof(uint32_t)}, {1, sizeof(uint32_t) * 1, sizeof(uint32_t)}, }; VkSpecializationInfo specInfo = { 2, entries, sizeof(specData), specData, }; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL, &specInfo)); pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-pSpecializationInfo-06719"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-CooperativeMatrixType"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-CooperativeMatrixMulAdd"); m_errorMonitor->SetUnexpectedError("VUID-VkPipelineShaderStageCreateInfo-pSpecializationInfo-06719"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, SubgroupSupportedProperties) { TEST_DESCRIPTION( "Test shader validation support for subgroup VkPhysicalDeviceSubgroupProperties such as supportedStages, and " "supportedOperations, quadOperationsInAllStages."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); // Don't enable the extenion on purpose const bool extension_support_partitioned = DeviceExtensionSupported(gpu(), nullptr, VK_NV_SHADER_SUBGROUP_PARTITIONED_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // 1.1 and up only. if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s Vulkan 1.1 not supported, skipping test\n", kSkipPrefix); return; } if (IsPlatform(kMockICD) || DeviceSimulation()) { printf("%s DevSim doesn't support Vulkan 1.1, skipping tests\n", kSkipPrefix); return; } // Gather all aspects supported VkPhysicalDeviceSubgroupProperties subgroup_prop = GetSubgroupProperties(instance(), gpu()); VkSubgroupFeatureFlags subgroup_operations = subgroup_prop.supportedOperations; const bool feature_support_basic = ((subgroup_operations & VK_SUBGROUP_FEATURE_BASIC_BIT) != 0); const bool feature_support_vote = ((subgroup_operations & VK_SUBGROUP_FEATURE_VOTE_BIT) != 0); const bool feature_support_arithmetic = ((subgroup_operations & VK_SUBGROUP_FEATURE_ARITHMETIC_BIT) != 0); const bool feature_support_ballot = ((subgroup_operations & VK_SUBGROUP_FEATURE_BALLOT_BIT) != 0); const bool feature_support_shuffle = ((subgroup_operations & VK_SUBGROUP_FEATURE_SHUFFLE_BIT) != 0); const bool feature_support_relative = ((subgroup_operations & VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT) != 0); const bool feature_support_culstered = ((subgroup_operations & VK_SUBGROUP_FEATURE_CLUSTERED_BIT) != 0); const bool feature_support_quad = ((subgroup_operations & VK_SUBGROUP_FEATURE_QUAD_BIT) != 0); const bool feature_support_partitioned = ((subgroup_operations & VK_SUBGROUP_FEATURE_PARTITIONED_BIT_NV) != 0); const bool vertex_support = ((subgroup_prop.supportedStages & VK_SHADER_STAGE_VERTEX_BIT) != 0); const bool vertex_quad_support = (subgroup_prop.quadOperationsInAllStages == VK_TRUE); std::string vsSource; std::vector<const char *> errors; // There is no 'supportedOperations' check due to it would be redundant to the Capability check done first in VUID 01091 since // each 'supportedOperations' flag is 1:1 map to a SPIR-V Capability const char *operation_vuid = "VUID-VkShaderModuleCreateInfo-pCode-01091"; const char *stage_vuid = "VUID-RuntimeSpirv-None-06343"; const char *quad_vuid = "VUID-RuntimeSpirv-None-06342"; // Same pipeline creation for each subgroup test auto info_override = [&](CreatePipelineHelper &info) { info.vs_.reset(new VkShaderObj(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_1)); info.shader_stages_ = {info.vs_->GetStageCreateInfo(), info.fs_->GetStageCreateInfo()}; info.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; // Basic { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_basic: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ if (subgroupElect()) { ssbo.x += 2.0; } gl_Position = vec4(ssbo.x); } )glsl"; errors.clear(); if (feature_support_basic == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Vote { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_vote: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ if (subgroupAll(ssbo.y == 0)) { ssbo.x += 2.0; } gl_Position = vec4(ssbo.x); } )glsl"; errors.clear(); if (feature_support_vote == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Arithmetic { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_arithmetic: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ float z = subgroupMax(ssbo.x); gl_Position = vec4(z); } )glsl"; errors.clear(); if (feature_support_arithmetic == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Ballot { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_ballot: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ float z = subgroupBroadcastFirst(ssbo.x); gl_Position = vec4(z); } )glsl"; errors.clear(); if (feature_support_ballot == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Shuffle { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_shuffle: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ float z = subgroupShuffle(ssbo.x, 1); gl_Position = vec4(z); } )glsl"; errors.clear(); if (feature_support_shuffle == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Shuffle Relative { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_shuffle_relative: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ float z = subgroupShuffleUp(ssbo.x, 1); gl_Position = vec4(z); } )glsl"; errors.clear(); if (feature_support_relative == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Clustered { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_clustered: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ float z = subgroupClusteredAdd(ssbo.x, 2); gl_Position = vec4(z); } )glsl"; errors.clear(); if (feature_support_culstered == false) { errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Quad { vsSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_quad: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ float z = subgroupQuadSwapHorizontal(ssbo.x); gl_Position = vec4(z); } )glsl"; errors.clear(); if (feature_support_quad == false) { errors.push_back(operation_vuid); } if (vertex_quad_support == false) { errors.push_back(quad_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } // Partitoned if (extension_support_partitioned) { vsSource = R"glsl( #version 450 #extension GL_NV_shader_subgroup_partitioned: enable layout(set = 0, binding = 0) buffer StorageBuffer { float x; uint y; } ssbo; void main(){ uvec4 a = subgroupPartitionNV(ssbo.x); // forces OpGroupNonUniformPartitionNV gl_Position = vec4(float(a.x)); } )glsl"; errors.clear(); // Extension not enabled on purpose if supported errors.push_back("VUID-VkShaderModuleCreateInfo-pCode-04147"); if (feature_support_partitioned == false) { // errors.push_back(operation_vuid); } if (vertex_support == false) { errors.push_back(stage_vuid); } CreatePipelineHelper::OneshotTest(*this, info_override, kErrorBit, errors, /*positive_test*/ (errors.size() == 0)); } } TEST_F(VkLayerTest, SubgroupRequired) { TEST_DESCRIPTION("Test that the minimum required functionality for subgroups is present."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); ASSERT_NO_FATAL_FAILURE(InitState()); // 1.1 and up only. if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s Vulkan 1.1 not supported, skipping test\n", kSkipPrefix); return; } if (IsPlatform(kMockICD) || DeviceSimulation()) { printf("%s DevSim doesn't support Vulkan 1.1, skipping tests\n", kSkipPrefix); return; } VkPhysicalDeviceSubgroupProperties subgroup_prop = GetSubgroupProperties(instance(), gpu()); auto queue_family_properties = m_device->phy().queue_properties(); bool foundGraphics = false; bool foundCompute = false; for (auto queue_family : queue_family_properties) { if (queue_family.queueFlags & VK_QUEUE_COMPUTE_BIT) { foundCompute = true; break; } if (queue_family.queueFlags & VK_QUEUE_GRAPHICS_BIT) { foundGraphics = true; } } if (!(foundGraphics || foundCompute)) return; ASSERT_GE(subgroup_prop.subgroupSize, 1u); if (foundCompute) { ASSERT_TRUE(subgroup_prop.supportedStages & VK_SHADER_STAGE_COMPUTE_BIT); } ASSERT_TRUE(subgroup_prop.supportedOperations & VK_SUBGROUP_FEATURE_BASIC_BIT); } TEST_F(VkLayerTest, SubgroupExtendedTypesEnabled) { TEST_DESCRIPTION("Test VK_KHR_shader_subgroup_extended_types."); SetTargetApiVersion(VK_API_VERSION_1_1); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto float16_features = LvlInitStruct<VkPhysicalDeviceFloat16Int8FeaturesKHR>(); auto extended_types_features = LvlInitStruct<VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR>(&float16_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&extended_types_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); VkPhysicalDeviceSubgroupProperties subgroup_prop = GetSubgroupProperties(instance(), gpu()); if (!(subgroup_prop.supportedOperations & VK_SUBGROUP_FEATURE_ARITHMETIC_BIT) || !(subgroup_prop.supportedStages & VK_SHADER_STAGE_COMPUTE_BIT) || !float16_features.shaderFloat16 || !extended_types_features.shaderSubgroupExtendedTypes) { printf("%s Required features not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); std::vector<VkDescriptorSetLayoutBinding> bindings(0); const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_arithmetic : enable #extension GL_EXT_shader_subgroup_extended_types_float16 : enable #extension GL_EXT_shader_explicit_arithmetic_types_float16 : enable layout(local_size_x = 32) in; void main() { subgroupAdd(float16_t(0.0)); } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1)); pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); pipe.CreateComputePipeline(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, SubgroupExtendedTypesDisabled) { TEST_DESCRIPTION("Test VK_KHR_shader_subgroup_extended_types."); SetTargetApiVersion(VK_API_VERSION_1_1); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); std::array<const char *, 2> required_device_extensions = { {VK_KHR_SHADER_SUBGROUP_EXTENDED_TYPES_EXTENSION_NAME, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto float16_features = LvlInitStruct<VkPhysicalDeviceFloat16Int8FeaturesKHR>(); auto extended_types_features = LvlInitStruct<VkPhysicalDeviceShaderSubgroupExtendedTypesFeaturesKHR>(&float16_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&extended_types_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); VkPhysicalDeviceSubgroupProperties subgroup_prop = GetSubgroupProperties(instance(), gpu()); if (!(subgroup_prop.supportedOperations & VK_SUBGROUP_FEATURE_ARITHMETIC_BIT) || !(subgroup_prop.supportedStages & VK_SHADER_STAGE_COMPUTE_BIT) || !float16_features.shaderFloat16) { printf("%s Required features not supported, skipping tests\n", kSkipPrefix); return; } // Disabled extended types support, and expect an error extended_types_features.shaderSubgroupExtendedTypes = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); std::vector<VkDescriptorSetLayoutBinding> bindings(0); const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); char const *csSource = R"glsl( #version 450 #extension GL_KHR_shader_subgroup_arithmetic : enable #extension GL_EXT_shader_subgroup_extended_types_float16 : enable #extension GL_EXT_shader_explicit_arithmetic_types_float16 : enable layout(local_size_x = 32) in; void main() { subgroupAdd(float16_t(0.0)); } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1)); pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-None-06275"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NonSemanticInfoEnabled) { TEST_DESCRIPTION("Test VK_KHR_shader_non_semantic_info."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME)) { printf("%s Extension %s not supported, skipping this test. \n", kSkipPrefix, VK_KHR_SHADER_NON_SEMANTIC_INFO_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); std::vector<VkDescriptorSetLayoutBinding> bindings(0); const VkDescriptorSetLayoutObj dsl(m_device, bindings); const VkPipelineLayoutObj pl(m_device, {&dsl}); const std::string source = R"( OpCapability Shader OpExtension "SPV_KHR_non_semantic_info" %non_semantic = OpExtInstImport "NonSemantic.Validation.Test" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 %void = OpTypeVoid %1 = OpExtInst %void %non_semantic 55 %void %func = OpTypeFunction %void %main = OpFunction %void None %func %2 = OpLabel OpReturn OpFunctionEnd )"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-04147"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, GraphicsPipelineStageCreationFeedbackCount) { TEST_DESCRIPTION("Test graphics pipeline feedback stage count check."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto feedback_info = LvlInitStruct<VkPipelineCreationFeedbackCreateInfoEXT>(); VkPipelineCreationFeedbackEXT feedbacks[3] = {}; // Set flags to known value that the driver has to overwrite feedbacks[0].flags = VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM; feedback_info.pPipelineCreationFeedback = &feedbacks[0]; feedback_info.pipelineStageCreationFeedbackCount = 2; feedback_info.pPipelineStageCreationFeedbacks = &feedbacks[1]; auto set_feedback = [&feedback_info](CreatePipelineHelper &helper) { helper.gp_ci_.pNext = &feedback_info; }; CreatePipelineHelper::OneshotTest(*this, set_feedback, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pipelineStageCreationFeedbackCount-06594", true); if (IsPlatform(kMockICD) || DeviceSimulation()) { printf("%s Driver data writeback check not supported by MockICD, skipping.\n", kSkipPrefix); } else { m_errorMonitor->ExpectSuccess(); if (feedback_info.pPipelineCreationFeedback->flags == VK_PIPELINE_CREATION_FEEDBACK_FLAG_BITS_MAX_ENUM) { m_errorMonitor->SetError("ValidationLayers did not return GraphicsPipelineFeedback driver data properly."); } m_errorMonitor->VerifyNotFound(); } feedback_info.pipelineStageCreationFeedbackCount = 1; CreatePipelineHelper::OneshotTest(*this, set_feedback, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pipelineStageCreationFeedbackCount-06594", false); } TEST_F(VkLayerTest, ComputePipelineStageCreationFeedbackCount) { TEST_DESCRIPTION("Test compute pipeline feedback stage count check."); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCreationFeedbackCreateInfoEXT feedback_info = LvlInitStruct<VkPipelineCreationFeedbackCreateInfoEXT>(); VkPipelineCreationFeedbackEXT feedbacks[3] = {}; feedback_info.pPipelineCreationFeedback = &feedbacks[0]; feedback_info.pipelineStageCreationFeedbackCount = 1; feedback_info.pPipelineStageCreationFeedbacks = &feedbacks[1]; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cp_ci_.pNext = &feedback_info; }; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); feedback_info.pipelineStageCreationFeedbackCount = 2; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-pipelineStageCreationFeedbackCount-06566"); } TEST_F(VkLayerTest, NVRayTracingPipelineStageCreationFeedbackCount) { TEST_DESCRIPTION("Test NV ray tracing pipeline feedback stage count check."); if (!CreateNVRayTracingPipelineHelper::InitInstanceExtensions(*this, m_instance_extension_names)) { return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_PIPELINE_CREATION_FEEDBACK_EXTENSION_NAME); return; } if (!CreateNVRayTracingPipelineHelper::InitDeviceExtensions(*this, m_device_extension_names)) { return; } ASSERT_NO_FATAL_FAILURE(InitState()); auto feedback_info = LvlInitStruct<VkPipelineCreationFeedbackCreateInfoEXT>(); VkPipelineCreationFeedbackEXT feedbacks[4] = {}; feedback_info.pPipelineCreationFeedback = &feedbacks[0]; feedback_info.pipelineStageCreationFeedbackCount = 2; feedback_info.pPipelineStageCreationFeedbacks = &feedbacks[1]; auto set_feedback = [&feedback_info](CreateNVRayTracingPipelineHelper &helper) { helper.rp_ci_.pNext = &feedback_info; }; feedback_info.pipelineStageCreationFeedbackCount = 3; CreateNVRayTracingPipelineHelper::OneshotPositiveTest(*this, set_feedback); feedback_info.pipelineStageCreationFeedbackCount = 2; CreateNVRayTracingPipelineHelper::OneshotTest(*this, set_feedback, "VUID-VkRayTracingPipelineCreateInfoNV-pipelineStageCreationFeedbackCount-06651"); } TEST_F(VkLayerTest, CreatePipelineCheckShaderImageFootprintEnabled) { TEST_DESCRIPTION("Create a pipeline requiring the shader image footprint feature which has not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME)) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NV_SHADER_IMAGE_FOOTPRINT_EXTENSION_NAME); return; } std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Disable the image footprint feature. auto image_footprint_features = LvlInitStruct<VkPhysicalDeviceShaderImageFootprintFeaturesNV>(); image_footprint_features.imageFootprint = VK_FALSE; VkDeviceObj test_device(0, gpu(), device_extension_names, &features, &image_footprint_features); char const *fsSource = R"glsl( #version 450 #extension GL_NV_shader_texture_footprint : require layout(set=0, binding=0) uniform sampler2D s; layout(location=0) out vec4 color; void main(){ gl_TextureFootprint2DNV footprint; if (textureFootprintNV(s, vec2(1.0), 5, false, footprint)) { color = vec4(0.0, 1.0, 0.0, 1.0); } else { color = vec4(vec2(footprint.anchor), vec2(footprint.offset)); } } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); vs.InitFromGLSLTry(bindStateVertShaderText, false, SPV_ENV_VULKAN_1_0, &test_device); fs.InitFromGLSLTry(fsSource, false, SPV_ENV_VULKAN_1_0, &test_device); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj ds_layout(&test_device, {binding}); ASSERT_TRUE(ds_layout.initialized()); const VkPipelineLayoutObj pipeline_layout(&test_device, {&ds_layout}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-04147"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckFragmentShaderBarycentricEnabled) { TEST_DESCRIPTION("Create a pipeline requiring the fragment shader barycentric feature which has not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Disable the fragment shader barycentric feature. auto fragment_shader_barycentric_features = LvlInitStruct<VkPhysicalDeviceFragmentShaderBarycentricFeaturesNV>(); fragment_shader_barycentric_features.fragmentShaderBarycentric = VK_FALSE; VkDeviceObj test_device(0, gpu(), device_extension_names, &features, &fragment_shader_barycentric_features); char const *fsSource = R"glsl( #version 450 #extension GL_NV_fragment_shader_barycentric : require layout(location=0) out float value; void main(){ value = gl_BaryCoordNV.x; } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); vs.InitFromGLSLTry(bindStateVertShaderText, false, SPV_ENV_VULKAN_1_0, &test_device); fs.InitFromGLSLTry(fsSource, false, SPV_ENV_VULKAN_1_0, &test_device); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(&test_device); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-04147"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckComputeShaderDerivativesEnabled) { TEST_DESCRIPTION("Create a pipeline requiring the compute shader derivatives feature which has not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); if (!DeviceExtensionSupported(gpu(), nullptr, VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME)) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NV_COMPUTE_SHADER_DERIVATIVES_EXTENSION_NAME); return; } std::vector<const char *> device_extension_names; auto features = m_device->phy().features(); // Disable the compute shader derivatives features. auto compute_shader_derivatives_features = LvlInitStruct<VkPhysicalDeviceComputeShaderDerivativesFeaturesNV>(); compute_shader_derivatives_features.computeDerivativeGroupLinear = VK_FALSE; compute_shader_derivatives_features.computeDerivativeGroupQuads = VK_FALSE; VkDeviceObj test_device(0, gpu(), device_extension_names, &features, &compute_shader_derivatives_features); VkDescriptorSetLayoutBinding binding = {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(&test_device, {binding}); const VkPipelineLayoutObj pl(&test_device, {&dsl}); char const *csSource = R"glsl( #version 450 #extension GL_NV_compute_shader_derivatives : require layout(local_size_x=2, local_size_y=4) in; layout(derivative_group_quadsNV) in; layout(set=0, binding=0) buffer InputOutputBuffer { float values[]; }; void main(){ values[gl_LocalInvocationIndex] = dFdx(values[gl_LocalInvocationIndex]); } )glsl"; VkShaderObj cs(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); cs.InitFromGLSLTry(csSource, false, SPV_ENV_VULKAN_1_0, &test_device); VkComputePipelineCreateInfo cpci = {VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO, nullptr, 0, {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, nullptr, 0, VK_SHADER_STAGE_COMPUTE_BIT, cs.handle(), "main", nullptr}, pl.handle(), VK_NULL_HANDLE, -1}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-04147"); VkPipeline pipe = VK_NULL_HANDLE; vk::CreateComputePipelines(test_device.device(), VK_NULL_HANDLE, 1, &cpci, nullptr, &pipe); m_errorMonitor->VerifyFound(); vk::DestroyPipeline(test_device.device(), pipe, nullptr); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, CreatePipelineCheckFragmentShaderInterlockEnabled) { TEST_DESCRIPTION("Create a pipeline requiring the fragment shader interlock feature which has not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<const char *> device_extension_names; if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME)) { // Note: we intentionally do not add the required extension to the device extension list. // in order to create the error below } else { // We skip this test if the extension is not supported by the driver as in some cases this will cause // the vk::CreateShaderModule to fail without generating an error message printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_FRAGMENT_SHADER_INTERLOCK_EXTENSION_NAME); return; } auto features = m_device->phy().features(); // Disable the fragment shader interlock feature. auto fragment_shader_interlock_features = LvlInitStruct<VkPhysicalDeviceFragmentShaderInterlockFeaturesEXT>(); fragment_shader_interlock_features.fragmentShaderSampleInterlock = VK_FALSE; fragment_shader_interlock_features.fragmentShaderPixelInterlock = VK_FALSE; fragment_shader_interlock_features.fragmentShaderShadingRateInterlock = VK_FALSE; VkDeviceObj test_device(0, gpu(), device_extension_names, &features, &fragment_shader_interlock_features); char const *fsSource = R"glsl( #version 450 #extension GL_ARB_fragment_shader_interlock : require layout(sample_interlock_ordered) in; void main(){ } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); vs.InitFromGLSLTry(bindStateVertShaderText, false, SPV_ENV_VULKAN_1_0, &test_device); fs.InitFromGLSLTry(fsSource, false, SPV_ENV_VULKAN_1_0, &test_device); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(&test_device); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-04147"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckDemoteToHelperInvocation) { TEST_DESCRIPTION("Create a pipeline requiring the demote to helper invocation feature which has not enabled on the device."); ASSERT_NO_FATAL_FAILURE(Init()); std::vector<const char *> device_extension_names; if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME)) { // Note: we intentionally do not add the required extension to the device extension list. // in order to create the error below } else { // We skip this test if the extension is not supported by the driver as in some cases this will cause // the vk::CreateShaderModule to fail without generating an error message printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SHADER_DEMOTE_TO_HELPER_INVOCATION_EXTENSION_NAME); return; } auto features = m_device->phy().features(); // Disable the demote to helper invocation feature. auto demote_features = LvlInitStruct<VkPhysicalDeviceShaderDemoteToHelperInvocationFeaturesEXT>(); demote_features.shaderDemoteToHelperInvocation = VK_FALSE; VkDeviceObj test_device(0, gpu(), device_extension_names, &features, &demote_features); char const *fsSource = R"glsl( #version 450 #extension GL_EXT_demote_to_helper_invocation : require void main(){ demote; } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); vs.InitFromGLSLTry(bindStateVertShaderText, false, SPV_ENV_VULKAN_1_0, &test_device); fs.InitFromGLSLTry(fsSource, false, SPV_ENV_VULKAN_1_0, &test_device); VkRenderpassObj render_pass(&test_device); VkPipelineObj pipe(&test_device); pipe.AddDefaultColorAttachment(); pipe.AddShader(&vs); pipe.AddShader(&fs); const VkPipelineLayoutObj pipeline_layout(&test_device); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-04147"); pipe.CreateVKPipeline(pipeline_layout.handle(), render_pass.handle()); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreatePipelineCheckLineRasterization) { TEST_DESCRIPTION("Test VK_EXT_line_rasterization state against feature enables."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); std::array<const char *, 1> required_device_extensions = {{VK_EXT_LINE_RASTERIZATION_EXTENSION_NAME}}; for (auto device_extension : required_device_extensions) { if (DeviceExtensionSupported(gpu(), nullptr, device_extension)) { m_device_extension_names.push_back(device_extension); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, device_extension); return; } } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto line_rasterization_features = LvlInitStruct<VkPhysicalDeviceLineRasterizationFeaturesEXT>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&line_rasterization_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); line_rasterization_features.rectangularLines = VK_FALSE; line_rasterization_features.bresenhamLines = VK_FALSE; line_rasterization_features.smoothLines = VK_FALSE; line_rasterization_features.stippledRectangularLines = VK_FALSE; line_rasterization_features.stippledBresenhamLines = VK_FALSE; line_rasterization_features.stippledSmoothLines = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper::OneshotTest( *this, [&](CreatePipelineHelper &helper) { helper.line_state_ci_.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT; helper.pipe_ms_state_ci_.alphaToCoverageEnable = VK_TRUE; }, kErrorBit, std::vector<const char *>{"VUID-VkGraphicsPipelineCreateInfo-lineRasterizationMode-02766", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02769"}); CreatePipelineHelper::OneshotTest( *this, [&](CreatePipelineHelper &helper) { helper.line_state_ci_.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_BRESENHAM_EXT; helper.line_state_ci_.stippledLineEnable = VK_TRUE; }, kErrorBit, std::vector<const char *>{"VUID-VkGraphicsPipelineCreateInfo-stippledLineEnable-02767", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02769", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02772"}); CreatePipelineHelper::OneshotTest( *this, [&](CreatePipelineHelper &helper) { helper.line_state_ci_.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_EXT; helper.line_state_ci_.stippledLineEnable = VK_TRUE; }, kErrorBit, std::vector<const char *>{"VUID-VkGraphicsPipelineCreateInfo-stippledLineEnable-02767", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02768", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02771"}); CreatePipelineHelper::OneshotTest( *this, [&](CreatePipelineHelper &helper) { helper.line_state_ci_.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_RECTANGULAR_SMOOTH_EXT; helper.line_state_ci_.stippledLineEnable = VK_TRUE; }, kErrorBit, std::vector<const char *>{"VUID-VkGraphicsPipelineCreateInfo-stippledLineEnable-02767", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-lineRasterizationMode-02770", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02773"}); CreatePipelineHelper::OneshotTest( *this, [&](CreatePipelineHelper &helper) { helper.line_state_ci_.lineRasterizationMode = VK_LINE_RASTERIZATION_MODE_DEFAULT_EXT; helper.line_state_ci_.stippledLineEnable = VK_TRUE; }, kErrorBit, std::vector<const char *>{"VUID-VkGraphicsPipelineCreateInfo-stippledLineEnable-02767", "VUID-VkPipelineRasterizationLineStateCreateInfoEXT-stippledLineEnable-02774"}); PFN_vkCmdSetLineStippleEXT vkCmdSetLineStippleEXT = (PFN_vkCmdSetLineStippleEXT)vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetLineStippleEXT"); ASSERT_TRUE(vkCmdSetLineStippleEXT != nullptr); m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdSetLineStippleEXT-lineStippleFactor-02776"); vkCmdSetLineStippleEXT(m_commandBuffer->handle(), 0, 0); m_errorMonitor->VerifyFound(); vkCmdSetLineStippleEXT(m_commandBuffer->handle(), 1, 1); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, FillRectangleNV) { TEST_DESCRIPTION("Verify VK_NV_fill_rectangle"); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); VkPhysicalDeviceFeatures device_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&device_features)); // Disable non-solid fill modes to make sure that the usage of VK_POLYGON_MODE_LINE and // VK_POLYGON_MODE_POINT will cause an error when the VK_NV_fill_rectangle extension is enabled. device_features.fillModeNonSolid = VK_FALSE; if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_FILL_RECTANGLE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_FILL_RECTANGLE_EXTENSION_NAME); } else { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_NV_FILL_RECTANGLE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState(&device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPolygonMode polygon_mode = VK_POLYGON_MODE_LINE; auto set_polygon_mode = [&polygon_mode](CreatePipelineHelper &helper) { helper.rs_state_ci_.polygonMode = polygon_mode; }; // Set unsupported polygon mode VK_POLYGON_MODE_LINE CreatePipelineHelper::OneshotTest(*this, set_polygon_mode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01507", false); // Set unsupported polygon mode VK_POLYGON_MODE_POINT polygon_mode = VK_POLYGON_MODE_POINT; CreatePipelineHelper::OneshotTest(*this, set_polygon_mode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01507", false); // Set supported polygon mode VK_POLYGON_MODE_FILL polygon_mode = VK_POLYGON_MODE_FILL; CreatePipelineHelper::OneshotTest(*this, set_polygon_mode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01507", true); // Set supported polygon mode VK_POLYGON_MODE_FILL_RECTANGLE_NV polygon_mode = VK_POLYGON_MODE_FILL_RECTANGLE_NV; CreatePipelineHelper::OneshotTest(*this, set_polygon_mode, kErrorBit, "VUID-VkPipelineRasterizationStateCreateInfo-polygonMode-01507", true); } TEST_F(VkLayerTest, NotCompatibleForSet) { TEST_DESCRIPTION("Check that validation path catches pipeline layout inconsistencies for bind vs. dispatch"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); auto c_queue = m_device->GetDefaultComputeQueue(); if (nullptr == c_queue) { printf("Compute not supported, skipping test\n"); return; } uint32_t qfi = 0; VkBufferCreateInfo bci = LvlInitStruct<VkBufferCreateInfo>(); bci.usage = VK_BUFFER_USAGE_STORAGE_BUFFER_BIT; bci.size = 4; bci.queueFamilyIndexCount = 1; bci.pQueueFamilyIndices = &qfi; VkBufferObj storage_buffer; VkMemoryPropertyFlags mem_props = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT; storage_buffer.init(*m_device, bci, mem_props); VkBufferObj uniform_buffer; bci.usage = VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT; bci.size = 20; uniform_buffer.init(*m_device, bci, mem_props); OneOffDescriptorSet::Bindings binding_defs = { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, }; const VkDescriptorSetLayoutObj pipeline_dsl(m_device, binding_defs); const VkPipelineLayoutObj pipeline_layout(m_device, {&pipeline_dsl}); // We now will use a slightly different Layout definition for the descriptors we acutally bind with (but that would still be // correct for the shader binding_defs[1].stageFlags = VK_SHADER_STAGE_COMPUTE_BIT; OneOffDescriptorSet binding_descriptor_set(m_device, binding_defs); const VkPipelineLayoutObj binding_pipeline_layout(m_device, {&binding_descriptor_set.layout_}); VkDescriptorBufferInfo storage_buffer_info = {storage_buffer.handle(), 0, sizeof(uint32_t)}; VkDescriptorBufferInfo uniform_buffer_info = {uniform_buffer.handle(), 0, 5 * sizeof(uint32_t)}; VkWriteDescriptorSet descriptor_writes[2] = {}; descriptor_writes[0] = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_writes[0].dstSet = binding_descriptor_set.set_; descriptor_writes[0].dstBinding = 0; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_BUFFER; descriptor_writes[0].pBufferInfo = &storage_buffer_info; descriptor_writes[1] = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_writes[1].dstSet = binding_descriptor_set.set_; descriptor_writes[1].dstBinding = 1; descriptor_writes[1].descriptorCount = 1; // Write 4 bytes to val descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; descriptor_writes[1].pBufferInfo = &uniform_buffer_info; vk::UpdateDescriptorSets(m_device->device(), 2, descriptor_writes, 0, NULL); char const *csSource = R"glsl( #version 450 #extension GL_EXT_nonuniform_qualifier : enable layout(set = 0, binding = 0) buffer StorageBuffer { uint index; } u_index; layout(set = 0, binding = 1) uniform UniformStruct { ivec4 dummy; int val; } ubo; void main() { u_index.index = ubo.val; } )glsl"; VkShaderObj shader_module(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); VkPipelineShaderStageCreateInfo stage = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage.flags = 0; stage.stage = VK_SHADER_STAGE_COMPUTE_BIT; stage.module = shader_module.handle(); stage.pName = "main"; stage.pSpecializationInfo = nullptr; // CreateComputePipelines VkComputePipelineCreateInfo pipeline_info = LvlInitStruct<VkComputePipelineCreateInfo>(); pipeline_info.flags = 0; pipeline_info.layout = pipeline_layout.handle(); pipeline_info.basePipelineHandle = VK_NULL_HANDLE; pipeline_info.basePipelineIndex = -1; pipeline_info.stage = stage; VkPipeline c_pipeline; vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &pipeline_info, nullptr, &c_pipeline); m_commandBuffer->begin(); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, c_pipeline); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, binding_pipeline_layout.handle(), 0, 1, &binding_descriptor_set.set_, 0, nullptr); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDispatch-None-02697"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-DrawState-PipelineLayoutsIncompatible"); vk::CmdDispatch(m_commandBuffer->handle(), 1, 1, 1); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); vk::DestroyPipeline(device(), c_pipeline, nullptr); } TEST_F(VkLayerTest, RayTracingPipelineShaderGroupsNV) { TEST_DESCRIPTION("Validate shader groups during ray-tracing pipeline creation"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_RAY_TRACING_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_RAY_TRACING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NV_RAY_TRACING_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); const VkPipelineLayoutObj empty_pipeline_layout(m_device, {}); const std::string empty_shader = R"glsl( #version 460 #extension GL_NV_ray_tracing : require void main() {} )glsl"; VkShaderObj rgen_shader(this, empty_shader, VK_SHADER_STAGE_RAYGEN_BIT_NV); VkShaderObj ahit_shader(this, empty_shader, VK_SHADER_STAGE_ANY_HIT_BIT_NV); VkShaderObj chit_shader(this, empty_shader, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV); VkShaderObj miss_shader(this, empty_shader, VK_SHADER_STAGE_MISS_BIT_NV); VkShaderObj intr_shader(this, empty_shader, VK_SHADER_STAGE_INTERSECTION_BIT_NV); VkShaderObj call_shader(this, empty_shader, VK_SHADER_STAGE_CALLABLE_BIT_NV); m_errorMonitor->VerifyNotFound(); PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = reinterpret_cast<PFN_vkCreateRayTracingPipelinesNV>(vk::GetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesNV")); ASSERT_TRUE(vkCreateRayTracingPipelinesNV != nullptr); VkPipeline pipeline = VK_NULL_HANDLE; // No raygen stage { VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; stage_create_info.module = chit_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_info.generalShader = VK_SHADER_UNUSED_NV; group_create_info.closestHitShader = 0; group_create_info.anyHitShader = VK_SHADER_UNUSED_NV; group_create_info.intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoNV-stage-06232"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Two raygen stages { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[1].module = rgen_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[1].generalShader = 1; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyNotFound(); vk::DestroyPipeline(m_device->device(), pipeline, NULL); } // General shader index doesn't exist { VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_info.module = rgen_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_info.generalShader = 1; // Bad index here group_create_info.closestHitShader = VK_SHADER_UNUSED_NV; group_create_info.anyHitShader = VK_SHADER_UNUSED_NV; group_create_info.intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02413"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // General shader index doesn't correspond to a raygen/miss/callable shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[1].generalShader = 1; // Index 1 corresponds to a closest hit shader group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02413"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // General shader group should not specify non general shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[1].generalShader = 0; group_create_infos[1].closestHitShader = 0; // This should not be set for a general shader group group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02414"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Intersection shader invalid index { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_INTERSECTION_BIT_NV; stage_create_infos[1].module = intr_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = 5; // invalid index VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02415"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Intersection shader index does not correspond to intersection shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_INTERSECTION_BIT_NV; stage_create_infos[1].module = intr_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = 0; // Index 0 corresponds to a raygen shader VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02415"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Intersection shader must not be specified for triangle hit group { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_INTERSECTION_BIT_NV; stage_create_infos[1].module = intr_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = 1; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-type-02416"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Any hit shader index invalid { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_ANY_HIT_BIT_NV; stage_create_infos[1].module = ahit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = 5; // Invalid index group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-anyHitShader-02418"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Any hit shader index does not correspond to an any hit shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].anyHitShader = 1; // Index 1 corresponds to a closest hit shader group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-anyHitShader-02418"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Closest hit shader index invalid { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = 5; // Invalid index group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-closestHitShader-02417"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Closest hit shader index does not correspond to an closest hit shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_ANY_HIT_BIT_NV; stage_create_infos[1].module = ahit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_NV; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_infos[1].generalShader = VK_SHADER_UNUSED_NV; group_create_infos[1].closestHitShader = 1; // Index 1 corresponds to an any hit shader group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_NV; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_NV; VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingShaderGroupCreateInfoNV-closestHitShader-02417"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ValidateRayTracingPipelineNV) { TEST_DESCRIPTION("Validate vkCreateRayTracingPipelinesNV and CreateInfo parameters during ray-tracing pipeline creation"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_NV_RAY_TRACING_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_NV_RAY_TRACING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_NV_RAY_TRACING_EXTENSION_NAME); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto pipleline_features = LvlInitStruct<VkPhysicalDevicePipelineCreationCacheControlFeaturesEXT>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&pipleline_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); // Set this to true as it is a required feature pipleline_features.pipelineCreationCacheControl = VK_TRUE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); const VkPipelineLayoutObj empty_pipeline_layout(m_device, {}); const std::string empty_shader = R"glsl( #version 460 #extension GL_NV_ray_tracing : require void main() {} )glsl"; VkShaderObj rgen_shader(this, empty_shader, VK_SHADER_STAGE_RAYGEN_BIT_NV); VkShaderObj ahit_shader(this, empty_shader, VK_SHADER_STAGE_ANY_HIT_BIT_NV); VkShaderObj chit_shader(this, empty_shader, VK_SHADER_STAGE_CLOSEST_HIT_BIT_NV); VkShaderObj miss_shader(this, empty_shader, VK_SHADER_STAGE_MISS_BIT_NV); VkShaderObj intr_shader(this, empty_shader, VK_SHADER_STAGE_INTERSECTION_BIT_NV); VkShaderObj call_shader(this, empty_shader, VK_SHADER_STAGE_CALLABLE_BIT_NV); m_errorMonitor->VerifyNotFound(); PFN_vkCreateRayTracingPipelinesNV vkCreateRayTracingPipelinesNV = reinterpret_cast<PFN_vkCreateRayTracingPipelinesNV>(vk::GetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesNV")); ASSERT_TRUE(vkCreateRayTracingPipelinesNV != nullptr); VkPipeline pipeline = VK_NULL_HANDLE; VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_RAYGEN_BIT_NV; ; stage_create_info.module = rgen_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoNV group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoNV>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_NV; group_create_info.generalShader = VK_SHADER_UNUSED_NV; group_create_info.closestHitShader = VK_SHADER_UNUSED_NV; group_create_info.anyHitShader = VK_SHADER_UNUSED_NV; group_create_info.intersectionShader = VK_SHADER_UNUSED_NV; { VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_DERIVATIVE_BIT; pipeline_ci.basePipelineIndex = -1; uint64_t fake_pipeline_id = 0xCADECADE; VkPipeline fake_pipeline_handle = reinterpret_cast<VkPipeline &>(fake_pipeline_id); pipeline_ci.basePipelineHandle = fake_pipeline_handle; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03421"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.basePipelineHandle = VK_NULL_HANDLE; pipeline_ci.basePipelineIndex = 10; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateRayTracingPipelinesNV-flags-03415"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03422"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_DEFER_COMPILE_BIT_NV | VK_PIPELINE_CREATE_FAIL_ON_PIPELINE_COMPILE_REQUIRED_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-02957"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-02904"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03456"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03458"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03459"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03460"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03461"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03462"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03463"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-03588"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoNV-flags-04948"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // test for vkCreateRayTracingPipelinesNV { VkRayTracingPipelineCreateInfoNV pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoNV>(); pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); // appending twice as it is generated twice in auto-validation code m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateRayTracingPipelinesNV-createInfoCount-arraylength"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateRayTracingPipelinesNV-createInfoCount-arraylength"); vkCreateRayTracingPipelinesNV(m_device->handle(), VK_NULL_HANDLE, 0, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RayTracingPipelineCreateInfoKHR) { TEST_DESCRIPTION("Validate CreateInfo parameters during ray-tracing pipeline creation"); SetTargetApiVersion(VK_API_VERSION_1_1); if (!AddRequiredInstanceExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework()); if (AddRequiredDeviceExtensions(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME) && AddRequiredDeviceExtensions(VK_KHR_RAY_QUERY_EXTENSION_NAME)) { AddRequiredDeviceExtensions(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_SPIRV_1_4_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto ray_tracing_features = LvlInitStruct<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!ray_tracing_features.rayTracingPipeline) { printf("%s Feature rayTracing is not supported.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); const VkPipelineLayoutObj empty_pipeline_layout(m_device, {}); const std::string empty_shader = R"glsl( #version 460 #extension GL_NV_ray_tracing : require void main() {} )glsl"; VkShaderObj rgen_shader(this, empty_shader, VK_SHADER_STAGE_RAYGEN_BIT_KHR); VkShaderObj ahit_shader(this, empty_shader, VK_SHADER_STAGE_ANY_HIT_BIT_KHR); VkShaderObj chit_shader(this, empty_shader, VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR); VkShaderObj miss_shader(this, empty_shader, VK_SHADER_STAGE_MISS_BIT_KHR); VkShaderObj intr_shader(this, empty_shader, VK_SHADER_STAGE_INTERSECTION_BIT_KHR); VkShaderObj call_shader(this, empty_shader, VK_SHADER_STAGE_CALLABLE_BIT_KHR); m_errorMonitor->VerifyNotFound(); PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR = reinterpret_cast<PFN_vkCreateRayTracingPipelinesKHR>(vk::GetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesKHR")); ASSERT_TRUE(vkCreateRayTracingPipelinesKHR != nullptr); VkPipeline pipeline = VK_NULL_HANDLE; VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_info.module = rgen_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_info.generalShader = 1; // Bad index here group_create_info.closestHitShader = VK_SHADER_UNUSED_KHR; group_create_info.anyHitShader = VK_SHADER_UNUSED_KHR; group_create_info.intersectionShader = VK_SHADER_UNUSED_KHR; VkPipelineLibraryCreateInfoKHR library_count_zero = {VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR, NULL, 0}; VkPipelineLibraryCreateInfoKHR library_count_one = {VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR, NULL, 1}; { VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_zero; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.stageCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03600"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.stageCount = 1; pipeline_ci.groupCount = 0; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03601"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.groupCount = 1; } { VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_one; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.pLibraryInterface = NULL; m_errorMonitor->SetUnexpectedError("VUID-VkPipelineLibraryCreateInfoKHR-pLibraries-parameter"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-pLibraryInfo-03590"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_zero; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-02904"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_zero; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_DERIVATIVE_BIT; pipeline_ci.basePipelineIndex = -1; uint64_t fake_pipeline_id = 0xCADECADE; VkPipeline fake_pipeline_handle = reinterpret_cast<VkPipeline &>(fake_pipeline_id); pipeline_ci.basePipelineHandle = fake_pipeline_handle; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03421"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); pipeline_ci.basePipelineHandle = VK_NULL_HANDLE; pipeline_ci.basePipelineIndex = 10; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateRayTracingPipelinesKHR-flags-03415"); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03422"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_zero; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR; pipeline_ci.pLibraryInterface = NULL; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03465"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { VkDynamicState dynamic_state = VK_DYNAMIC_STATE_BLEND_CONSTANTS; VkPipelineDynamicStateCreateInfo dynamic_states = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dynamic_states.dynamicStateCount = 1; dynamic_states.pDynamicStates = &dynamic_state; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_zero; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.stageCount = 1; pipeline_ci.pDynamicState = &dynamic_states; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-pDynamicStates-03602"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_count_zero; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03470"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-03471"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; pipeline_ci.flags = VK_PIPELINE_CREATE_DISPATCH_BASE; m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCreateRayTracingPipelinesKHR-flags-03816"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, RayTracingPipelineShaderGroupsKHR) { TEST_DESCRIPTION("Validate shader groups during ray-tracing pipeline creation"); SetTargetApiVersion(VK_API_VERSION_1_2); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SPIRV_1_4_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto ray_tracing_features = LvlInitStruct<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!ray_tracing_features.rayTracingPipeline) { printf("%s Feature rayTracing is not supported.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); const VkPipelineLayoutObj empty_pipeline_layout(m_device, {}); const std::string empty_shader = R"glsl( #version 460 #extension GL_EXT_ray_tracing : require void main() {} )glsl"; VkShaderObj rgen_shader(this, empty_shader, VK_SHADER_STAGE_RAYGEN_BIT_KHR, SPV_ENV_VULKAN_1_2); VkShaderObj ahit_shader(this, empty_shader, VK_SHADER_STAGE_ANY_HIT_BIT_KHR, SPV_ENV_VULKAN_1_2); VkShaderObj chit_shader(this, empty_shader, VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR, SPV_ENV_VULKAN_1_2); VkShaderObj miss_shader(this, empty_shader, VK_SHADER_STAGE_MISS_BIT_KHR, SPV_ENV_VULKAN_1_2); VkShaderObj intr_shader(this, empty_shader, VK_SHADER_STAGE_INTERSECTION_BIT_KHR, SPV_ENV_VULKAN_1_2); VkShaderObj call_shader(this, empty_shader, VK_SHADER_STAGE_CALLABLE_BIT_KHR, SPV_ENV_VULKAN_1_2); m_errorMonitor->VerifyNotFound(); PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR = reinterpret_cast<PFN_vkCreateRayTracingPipelinesKHR>(vk::GetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesKHR")); ASSERT_TRUE(vkCreateRayTracingPipelinesKHR != nullptr); VkPipeline pipeline = VK_NULL_HANDLE; VkPipelineLibraryCreateInfoKHR library_info = {VK_STRUCTURE_TYPE_PIPELINE_LIBRARY_CREATE_INFO_KHR, NULL, 0}; // No raygen stage { VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR; stage_create_info.module = chit_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; group_create_info.generalShader = VK_SHADER_UNUSED_KHR; group_create_info.closestHitShader = 0; group_create_info.anyHitShader = VK_SHADER_UNUSED_KHR; group_create_info.intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingPipelineCreateInfoKHR-stage-03425"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // General shader index doesn't exist { VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_info.module = rgen_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_info.generalShader = 1; // Bad index here group_create_info.closestHitShader = VK_SHADER_UNUSED_KHR; group_create_info.anyHitShader = VK_SHADER_UNUSED_KHR; group_create_info.intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03474"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // General shader index doesn't correspond to a raygen/miss/callable shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[1].generalShader = 1; // Index 1 corresponds to a closest hit shader group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03474"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // General shader group should not specify non general shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[1].generalShader = 0; group_create_infos[1].closestHitShader = 0; // This should not be set for a general shader group group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03475"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Intersection shader invalid index { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_INTERSECTION_BIT_KHR; stage_create_infos[1].module = intr_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = 5; // invalid index VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03476"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Intersection shader index does not correspond to intersection shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_INTERSECTION_BIT_KHR; stage_create_infos[1].module = intr_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_PROCEDURAL_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = 0; // Index 0 corresponds to a raygen shader VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03476"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Intersection shader must not be specified for triangle hit group { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_INTERSECTION_BIT_KHR; stage_create_infos[1].module = intr_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = 1; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-type-03477"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Any hit shader index invalid { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_ANY_HIT_BIT_KHR; stage_create_infos[1].module = ahit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].anyHitShader = 5; // IKHRalid index group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-anyHitShader-03479"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Any hit shader index does not correspond to an any hit shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].anyHitShader = 1; // Index 1 corresponds to a closest hit shader group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-anyHitShader-03479"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Closest hit shader index invalid { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_CLOSEST_HIT_BIT_KHR; stage_create_infos[1].module = chit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = 5; // invalid index group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-closestHitShader-03478"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } // Closest hit shader index does not correspond to an closest hit shader { VkPipelineShaderStageCreateInfo stage_create_infos[2] = {}; stage_create_infos[0] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[0].stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_infos[0].module = rgen_shader.handle(); stage_create_infos[0].pName = "main"; stage_create_infos[1] = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_infos[1].stage = VK_SHADER_STAGE_ANY_HIT_BIT_KHR; stage_create_infos[1].module = ahit_shader.handle(); stage_create_infos[1].pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_infos[2] = {}; group_create_infos[0] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[0].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_infos[0].generalShader = 0; group_create_infos[0].closestHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[0].intersectionShader = VK_SHADER_UNUSED_KHR; group_create_infos[1] = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_infos[1].type = VK_RAY_TRACING_SHADER_GROUP_TYPE_TRIANGLES_HIT_GROUP_KHR; group_create_infos[1].generalShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].closestHitShader = 1; // Index 1 corresponds to an any hit shader group_create_infos[1].anyHitShader = VK_SHADER_UNUSED_KHR; group_create_infos[1].intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.pLibraryInfo = &library_info; pipeline_ci.stageCount = 2; pipeline_ci.pStages = stage_create_infos; pipeline_ci.groupCount = 2; pipeline_ci.pGroups = group_create_infos; pipeline_ci.layout = empty_pipeline_layout.handle(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkRayTracingShaderGroupCreateInfoKHR-closestHitShader-03478"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, PipelineStageConditionalRenderingWithWrongQueue) { TEST_DESCRIPTION("Run CmdPipelineBarrier with VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT and wrong VkQueueFlagBits"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t only_transfer_queueFamilyIndex = UINT32_MAX; const auto q_props = vk_testing::PhysicalDevice(gpu()).queue_properties(); ASSERT_TRUE(q_props.size() > 0); ASSERT_TRUE(q_props[0].queueCount > 0); for (uint32_t i = 0; i < (uint32_t)q_props.size(); i++) { if (q_props[i].queueFlags == VK_QUEUE_TRANSFER_BIT) { only_transfer_queueFamilyIndex = i; break; } } if (only_transfer_queueFamilyIndex == UINT32_MAX) { printf("%s Only VK_QUEUE_TRANSFER_BIT Queue is not supported.\n", kSkipPrefix); return; } // A renderpass with a single subpass that declared a self-dependency VkAttachmentDescription attach[] = { {0, VK_FORMAT_R8G8B8A8_UNORM, VK_SAMPLE_COUNT_1_BIT, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_ATTACHMENT_LOAD_OP_DONT_CARE, VK_ATTACHMENT_STORE_OP_DONT_CARE, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}, }; VkAttachmentReference ref = {0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL}; VkSubpassDescription subpasses[] = { {0, VK_PIPELINE_BIND_POINT_GRAPHICS, 0, nullptr, 1, &ref, nullptr, nullptr, 0, nullptr}, }; VkSubpassDependency dependency = {0, 0, VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT, (VkDependencyFlags)0}; VkRenderPassCreateInfo rpci = {VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, nullptr, 0, 1, attach, 1, subpasses, 1, &dependency}; VkRenderPass rp; vk::CreateRenderPass(m_device->device(), &rpci, nullptr, &rp); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT); VkImageView imageView = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo fbci = {VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, nullptr, 0, rp, 1, &imageView, 32, 32, 1}; VkFramebuffer fb; vk::CreateFramebuffer(m_device->device(), &fbci, nullptr, &fb); VkCommandPoolObj commandPool(m_device, only_transfer_queueFamilyIndex); VkCommandBufferObj commandBuffer(m_device, &commandPool); commandBuffer.begin(); VkRenderPassBeginInfo rpbi = {VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, nullptr, rp, fb, {{ 0, 0, }, {32, 32}}, 0, nullptr}; vk::CmdBeginRenderPass(commandBuffer.handle(), &rpbi, VK_SUBPASS_CONTENTS_INLINE); VkImageMemoryBarrier imb = LvlInitStruct<VkImageMemoryBarrier>(); imb.srcAccessMask = VK_ACCESS_SHADER_WRITE_BIT; imb.dstAccessMask = VK_ACCESS_CONDITIONAL_RENDERING_READ_BIT_EXT; imb.oldLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; imb.newLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; imb.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; imb.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED; imb.image = image.handle(); imb.subresourceRange.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; imb.subresourceRange.baseMipLevel = 0; imb.subresourceRange.levelCount = 1; imb.subresourceRange.baseArrayLayer = 0; imb.subresourceRange.layerCount = 1; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPipelineBarrier-srcStageMask-06461"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdPipelineBarrier-dstStageMask-06462"); vk::CmdPipelineBarrier(commandBuffer.handle(), VK_PIPELINE_STAGE_VERTEX_SHADER_BIT, VK_PIPELINE_STAGE_CONDITIONAL_RENDERING_BIT_EXT, 0, 0, nullptr, 0, nullptr, 1, &imb); m_errorMonitor->VerifyFound(); vk::CmdEndRenderPass(commandBuffer.handle()); commandBuffer.end(); vk::DestroyRenderPass(m_device->device(), rp, nullptr); vk::DestroyFramebuffer(m_device->device(), fb, nullptr); } TEST_F(VkLayerTest, CreatePipelineDynamicUniformIndex) { TEST_DESCRIPTION("Check for the array dynamic array index features when the SPIR-V capabilities are requested."); VkPhysicalDeviceFeatures features{}; features.shaderUniformBufferArrayDynamicIndexing = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); std::string const source{R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %main "main" OpExecutionMode %main OriginUpperLeft OpSource GLSL 450 OpName %main "main" %void = OpTypeVoid %3 = OpTypeFunction %void %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd)"}; { std::string const capability{"OpCapability UniformBufferArrayDynamicIndexing"}; VkShaderObj fs(this, capability + source, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {info.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01091"); } { std::string const capability{"OpCapability SampledImageArrayDynamicIndexing"}; VkShaderObj fs(this, capability + source, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {info.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01091"); } { std::string const capability{"OpCapability StorageBufferArrayDynamicIndexing"}; VkShaderObj fs(this, capability + source, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {info.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01091"); } { std::string const capability{"OpCapability StorageImageArrayDynamicIndexing"}; VkShaderObj fs(this, capability + source, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {info.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkShaderModuleCreateInfo-pCode-01091"); } } TEST_F(VkLayerTest, VertexStoresAndAtomicsFeatureDisable) { TEST_DESCRIPTION("Run shader with StoreOp or AtomicOp to verify if vertexPipelineStoresAndAtomics disable."); VkPhysicalDeviceFeatures features{}; features.vertexPipelineStoresAndAtomics = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test StoreOp { char const *vsSource = R"glsl( #version 450 layout(set=0, binding=0, rgba8) uniform image2D si0; void main() { imageStore(si0, ivec2(0), vec4(0)); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo(), info.fs_->GetStageCreateInfo()}; info.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_VERTEX_BIT, nullptr}}; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-RuntimeSpirv-NonWritable-06341"); } // Test AtomicOp { char const *vsSource = R"glsl( #version 450 layout(set=0, binding=0, rgba8) uniform image2D si0; void main() { imageAtomicExchange(si0, ivec2(0), 1); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo(), info.fs_->GetStageCreateInfo()}; info.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_VERTEX_BIT, nullptr}}; }; // extra VU for not enabling atomic float support CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, std::vector<string>{"VUID-RuntimeSpirv-None-06282", "VUID-RuntimeSpirv-NonWritable-06341"}); } } } TEST_F(VkLayerTest, FragmentStoresAndAtomicsFeatureDisable) { TEST_DESCRIPTION("Run shader with StoreOp or AtomicOp to verify if fragmentStoresAndAtomics disable."); VkPhysicalDeviceFeatures features{}; features.fragmentStoresAndAtomics = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test StoreOp { char const *fsSource = R"glsl( #version 450 layout(set=0, binding=0, rgba8) uniform image2D si0; void main() { imageStore(si0, ivec2(0), vec4(0)); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {info.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; info.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; }; CreatePipelineHelper::OneshotTest(*this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-RuntimeSpirv-NonWritable-06340"); } // Test AtomicOp { char const *fsSource = R"glsl( #version 450 layout(set=0, binding=0, rgba8) uniform image2D si0; void main() { imageAtomicExchange(si0, ivec2(0), 1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); if (VK_SUCCESS == fs.InitFromGLSLTry(fsSource)) { auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {info.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; info.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; }; // extra VU for not enabling atomic float support CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, std::vector<string>{"VUID-RuntimeSpirv-None-06282", "VUID-RuntimeSpirv-NonWritable-06340"}); } } } TEST_F(VkLayerTest, DuplicateDynamicStates) { TEST_DESCRIPTION("Create a pipeline with duplicate dynamic states set."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDynamicState dynamic_states[4] = {VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, VK_DYNAMIC_STATE_STENCIL_WRITE_MASK, VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK, VK_DYNAMIC_STATE_STENCIL_REFERENCE}; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.dyn_state_ci_ = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); pipe.dyn_state_ci_.flags = 0; pipe.dyn_state_ci_.dynamicStateCount = 4; pipe.dyn_state_ci_.pDynamicStates = dynamic_states; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); // Should error twice since 2 sets of duplicates now dynamic_states[3] = VK_DYNAMIC_STATE_STENCIL_WRITE_MASK; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineDynamicStateCreateInfo-pDynamicStates-01442"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NonGraphicsDynamicStates) { TEST_DESCRIPTION("Create a pipeline with non graphics dynamic states set."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkDynamicState dynamic_state = VK_DYNAMIC_STATE_MAX_ENUM; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.dyn_state_ci_ = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); pipe.dyn_state_ci_.flags = 0; pipe.dyn_state_ci_.dynamicStateCount = 1; pipe.dyn_state_ci_.pDynamicStates = &dynamic_state; dynamic_state = VK_DYNAMIC_STATE_RAY_TRACING_PIPELINE_STACK_SIZE_KHR; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicStates-03578"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PipelineMaxPerStageResources) { TEST_DESCRIPTION("Check case where pipeline is created that exceeds maxPerStageResources"); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vk::GetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = (PFN_vkGetOriginalPhysicalDeviceLimitsEXT)vk::GetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceLimitsEXT"); if (!(fpvkSetPhysicalDeviceLimitsEXT) || !(fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } // Spec requires a minimum of 128 so know this is setting it lower than that const uint32_t maxPerStageResources = 4; VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); props.limits.maxPerStageResources = maxPerStageResources; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); ASSERT_NO_FATAL_FAILURE(InitState()); // Adds the one color attachment ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // A case where it shouldn't error because no single stage is over limit std::vector<VkDescriptorSetLayoutBinding> layout_bindings_normal = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, maxPerStageResources, VK_SHADER_STAGE_VERTEX_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; // vertex test std::vector<VkDescriptorSetLayoutBinding> layout_bindings_vert = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, maxPerStageResources, VK_SHADER_STAGE_VERTEX_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; // fragment only has it at the limit because color attachment should push it over std::vector<VkDescriptorSetLayoutBinding> layout_bindings_frag = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, maxPerStageResources, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; // compute test std::vector<VkDescriptorSetLayoutBinding> layout_bindings_comp = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, maxPerStageResources, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; // Have case where it pushes limit from two setLayouts instead of two setLayoutBindings std::vector<VkDescriptorSetLayoutBinding> layout_binding_combined0 = { {0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, maxPerStageResources, VK_SHADER_STAGE_VERTEX_BIT, nullptr}}; std::vector<VkDescriptorSetLayoutBinding> layout_binding_combined1 = { {1, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_VERTEX_BIT, nullptr}}; const VkDescriptorSetLayoutObj ds_layout_normal(m_device, layout_bindings_normal); const VkDescriptorSetLayoutObj ds_layout_vert(m_device, layout_bindings_vert); const VkDescriptorSetLayoutObj ds_layout_frag(m_device, layout_bindings_frag); const VkDescriptorSetLayoutObj ds_layout_comp(m_device, layout_bindings_comp); const VkDescriptorSetLayoutObj ds_layout_combined0(m_device, layout_binding_combined0); const VkDescriptorSetLayoutObj ds_layout_combined1(m_device, layout_binding_combined1); CreateComputePipelineHelper compute_pipe(*this); compute_pipe.InitInfo(); compute_pipe.InitShaderInfo(); compute_pipe.InitState(); compute_pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds_layout_comp}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkComputePipelineCreateInfo-layout-01687"); compute_pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); CreatePipelineHelper graphics_pipe(*this); graphics_pipe.InitInfo(); graphics_pipe.InitShaderInfo(); graphics_pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds_layout_normal}); m_errorMonitor->ExpectSuccess(); graphics_pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyNotFound(); graphics_pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds_layout_vert}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-layout-01688"); graphics_pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); graphics_pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds_layout_frag}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-layout-01688"); graphics_pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); graphics_pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&ds_layout_combined0, &ds_layout_combined1}); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-layout-01688"); graphics_pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ValidateGetRayTracingCaptureReplayShaderGroupHandlesKHR) { TEST_DESCRIPTION("Validate vkGetRayTracingCaptureReplayShaderGroupHandlesKHR."); SetTargetApiVersion(VK_API_VERSION_1_1); auto rt_pipeline_features = LvlInitStruct<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(); rt_pipeline_features.rayTracingPipelineShaderGroupHandleCaptureReplay = VK_TRUE; auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&rt_pipeline_features); if (!InitFrameworkForRayTracingTest(this, true, m_instance_extension_names, m_device_extension_names, m_errorMonitor, false, false, false, &features2)) { return; } auto ray_tracing_features = LvlInitStruct<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(); PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (ray_tracing_features.rayTracingPipelineShaderGroupHandleCaptureReplay == VK_FALSE) { printf("%s rayTracingShaderGroupHandleCaptureReplay not enabled.\n", kSkipPrefix); return; } CreateNVRayTracingPipelineHelper rt_pipe(*this); rt_pipe.InitInfo(true /*isKHR*/); rt_pipe.rp_ci_KHR_.flags = VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR; rt_pipe.InitState(); VkResult err = rt_pipe.CreateKHRRayTracingPipeline(); ASSERT_VK_SUCCESS(err); VkBuffer buffer; VkBufferCreateInfo buf_info = LvlInitStruct<VkBufferCreateInfo>(); buf_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT; buf_info.size = 4096; buf_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE; err = vk::CreateBuffer(device(), &buf_info, NULL, &buffer); ASSERT_VK_SUCCESS(err); VkMemoryRequirements mem_reqs; vk::GetBufferMemoryRequirements(device(), buffer, &mem_reqs); VkMemoryAllocateInfo alloc_info = LvlInitStruct<VkMemoryAllocateInfo>(); alloc_info.allocationSize = 4096; VkDeviceMemory mem; err = vk::AllocateMemory(device(), &alloc_info, NULL, &mem); ASSERT_VK_SUCCESS(err); vk::BindBufferMemory(device(), buffer, mem, 0); PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR vkGetRayTracingCaptureReplayShaderGroupHandlesKHR = (PFN_vkGetRayTracingCaptureReplayShaderGroupHandlesKHR)vk::GetInstanceProcAddr( instance(), "vkGetRayTracingCaptureReplayShaderGroupHandlesKHR"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-arraylength"); vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(m_device->handle(), rt_pipe.pipeline_, 1, 1, 0, &buffer); m_errorMonitor->VerifyFound(); // dataSize must be at least VkPhysicalDeviceRayTracingPropertiesKHR::shaderGroupHandleCaptureReplaySize PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); auto ray_tracing_properties = LvlInitStruct<VkPhysicalDeviceRayTracingPipelinePropertiesKHR>(); auto properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&ray_tracing_properties); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); // Check only when the reported size is if (ray_tracing_properties.shaderGroupHandleCaptureReplaySize > 0) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484"); vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(m_device->handle(), rt_pipe.pipeline_, 1, 1, (ray_tracing_properties.shaderGroupHandleCaptureReplaySize - 1), &buffer); m_errorMonitor->VerifyFound(); } m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-dataSize-03484"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483"); // In CreateNVRayTracingPipelineHelper::InitKHRRayTracingPipelineInfo rp_ci_KHR_.groupCount = groups_KHR_.size(); vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(m_device->handle(), rt_pipe.pipeline_, 2, rt_pipe.groups_KHR_.size(), (ray_tracing_properties.shaderGroupHandleCaptureReplaySize - 1), &buffer); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-03483"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingCaptureReplayShaderGroupHandlesKHR-firstGroup-04051"); // In CreateNVRayTracingPipelineHelper::InitKHRRayTracingPipelineInfo rp_ci_KHR_.groupCount = groups_KHR_.size(); uint32_t invalid_firstgroup = rt_pipe.groups_KHR_.size() + 1; vkGetRayTracingCaptureReplayShaderGroupHandlesKHR(m_device->handle(), rt_pipe.pipeline_, invalid_firstgroup, 0, (ray_tracing_properties.shaderGroupHandleCaptureReplaySize - 1), &buffer); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ValidatePipelineExecutablePropertiesFeature) { TEST_DESCRIPTION("Try making calls without pipelineExecutableInfo."); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_PIPELINE_EXECUTABLE_PROPERTIES_EXTENSION_NAME); return; } VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR pipeline_exe_features = LvlInitStruct<VkPhysicalDevicePipelineExecutablePropertiesFeaturesKHR>(); pipeline_exe_features.pipelineExecutableInfo = VK_FALSE; // Starting with it off VkPhysicalDeviceFeatures2 features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&pipeline_exe_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // MockICD will return 0 for the executable count if (IsPlatform(kMockICD) || DeviceSimulation()) { printf("%s Test not supported by MockICD, skipping tests\n", kSkipPrefix); return; } PFN_vkGetPipelineExecutableInternalRepresentationsKHR vkGetPipelineExecutableInternalRepresentationsKHR = (PFN_vkGetPipelineExecutableInternalRepresentationsKHR)vk::GetDeviceProcAddr( m_device->device(), "vkGetPipelineExecutableInternalRepresentationsKHR"); PFN_vkGetPipelineExecutableStatisticsKHR vkGetPipelineExecutableStatisticsKHR = (PFN_vkGetPipelineExecutableStatisticsKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetPipelineExecutableStatisticsKHR"); PFN_vkGetPipelineExecutablePropertiesKHR vkGetPipelineExecutablePropertiesKHR = (PFN_vkGetPipelineExecutablePropertiesKHR)vk::GetDeviceProcAddr(m_device->device(), "vkGetPipelineExecutablePropertiesKHR"); ASSERT_TRUE(vkGetPipelineExecutableInternalRepresentationsKHR != nullptr); ASSERT_TRUE(vkGetPipelineExecutableStatisticsKHR != nullptr); ASSERT_TRUE(vkGetPipelineExecutablePropertiesKHR != nullptr); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.CreateGraphicsPipeline(); uint32_t count; VkPipelineExecutableInfoKHR pipeline_exe_info = LvlInitStruct<VkPipelineExecutableInfoKHR>(); pipeline_exe_info.pipeline = pipe.pipeline_; pipeline_exe_info.executableIndex = 0; VkPipelineInfoKHR pipeline_info = LvlInitStruct<VkPipelineInfoKHR>(); pipeline_info.pipeline = pipe.pipeline_; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipelineExecutableInfo-03276"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPipelineExecutableInternalRepresentationsKHR-pipeline-03278"); vkGetPipelineExecutableInternalRepresentationsKHR(m_device->device(), &pipeline_exe_info, &count, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPipelineExecutableStatisticsKHR-pipelineExecutableInfo-03272"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPipelineExecutableStatisticsKHR-pipeline-03274"); vkGetPipelineExecutableStatisticsKHR(m_device->device(), &pipeline_exe_info, &count, nullptr); m_errorMonitor->VerifyFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetPipelineExecutablePropertiesKHR-pipelineExecutableInfo-03270"); vkGetPipelineExecutablePropertiesKHR(m_device->device(), &pipeline_info, &count, nullptr); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, LimitsMaxSampleMaskWords) { TEST_DESCRIPTION("Test limit of maxSampleMaskWords."); if (!EnableDeviceProfileLayer()) { printf("%s Failed to enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework()); PFN_vkSetPhysicalDeviceLimitsEXT fpvkSetPhysicalDeviceLimitsEXT = (PFN_vkSetPhysicalDeviceLimitsEXT)vk::GetInstanceProcAddr(instance(), "vkSetPhysicalDeviceLimitsEXT"); PFN_vkGetOriginalPhysicalDeviceLimitsEXT fpvkGetOriginalPhysicalDeviceLimitsEXT = (PFN_vkGetOriginalPhysicalDeviceLimitsEXT)vk::GetInstanceProcAddr(instance(), "vkGetOriginalPhysicalDeviceLimitsEXT"); if (!(fpvkSetPhysicalDeviceLimitsEXT) || !(fpvkGetOriginalPhysicalDeviceLimitsEXT)) { printf("%s Can't find device_profile_api functions; skipped.\n", kSkipPrefix); return; } // Set limit to match with hardcoded values in shaders VkPhysicalDeviceProperties props; fpvkGetOriginalPhysicalDeviceLimitsEXT(gpu(), &props.limits); props.limits.maxSampleMaskWords = 3; fpvkSetPhysicalDeviceLimitsEXT(gpu(), &props.limits); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Valid input of sample mask char const *validSource = R"glsl( #version 450 layout(location = 0) out vec4 uFragColor; void main(){ int x = gl_SampleMaskIn[2]; int y = gl_SampleMaskIn[0]; uFragColor = vec4(0,1,0,1) * x * y; } )glsl"; VkShaderObj fsValid(this, validSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto validPipeline = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fsValid.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, validPipeline, kErrorBit | kWarningBit, "", true); // Exceed sample mask input array size char const *inputSource = R"glsl( #version 450 layout(location = 0) out vec4 uFragColor; void main(){ int x = gl_SampleMaskIn[3]; uFragColor = vec4(0,1,0,1) * x; } )glsl"; VkShaderObj fsInput(this, inputSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto inputPipeline = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fsInput.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, inputPipeline, kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-maxSampleMaskWords-00711"); // Exceed sample mask output array size char const *outputSource = R"glsl( #version 450 layout(location = 0) out vec4 uFragColor; void main(){ gl_SampleMask[3] = 1; uFragColor = vec4(0,1,0,1); } )glsl"; VkShaderObj fsOutput(this, outputSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto outputPipeline = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fsOutput.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, outputPipeline, kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-maxSampleMaskWords-00711"); } TEST_F(VkLayerTest, InvalidFragmentShadingRatePipeline) { TEST_DESCRIPTION("Specify invalid fragment shading rate values"); // Enable KHR_fragment_shading_rate and all of its required extensions bool fsr_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (fsr_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); if (fsr_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); } else { printf("%s requires VK_KHR_fragment_shading_rate.\n", kSkipPrefix); return; } VkPhysicalDeviceFragmentShadingRateFeaturesKHR fsr_features = LvlInitStruct<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(); fsr_features.pipelineFragmentShadingRate = true; VkPhysicalDeviceFeatures2 device_features = LvlInitStruct<VkPhysicalDeviceFeatures2>(&fsr_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &device_features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineFragmentShadingRateStateCreateInfoKHR fsr_ci = LvlInitStruct<VkPipelineFragmentShadingRateStateCreateInfoKHR>(); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 1; auto set_fsr_ci = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pNext = &fsr_ci; }; fsr_ci.fragmentSize.width = 0; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04494"); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 0; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04495"); fsr_ci.fragmentSize.height = 1; fsr_ci.fragmentSize.width = 3; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04496"); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 3; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04497"); fsr_ci.fragmentSize.height = 1; fsr_ci.fragmentSize.width = 8; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04498"); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 8; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04499"); fsr_ci.fragmentSize.height = 1; } TEST_F(VkLayerTest, InvalidFragmentShadingRatePipelineFeatureUsage) { TEST_DESCRIPTION("Specify invalid fsr pipeline settings for the enabled features"); // Enable KHR_fragment_shading_rate and all of its required extensions bool fsr_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (fsr_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); if (fsr_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); } else { printf("%s requires VK_KHR_fragment_shading_rate.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineFragmentShadingRateStateCreateInfoKHR fsr_ci = LvlInitStruct<VkPipelineFragmentShadingRateStateCreateInfoKHR>(); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 1; auto set_fsr_ci = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pNext = &fsr_ci; }; fsr_ci.fragmentSize.width = 2; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500"); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 2; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04500"); fsr_ci.fragmentSize.height = 1; fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04501"); fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; fsr_ci.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_REPLACE_KHR; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-04502"); fsr_ci.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; } TEST_F(VkLayerTest, InvalidFragmentShadingRatePipelineCombinerOpsLimit) { TEST_DESCRIPTION("Specify invalid use of combiner ops when non trivial ops aren't supported"); // Enable KHR_fragment_shading_rate and all of its required extensions bool fsr_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (fsr_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); if (fsr_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); } else { printf("%s requires VK_KHR_fragment_shading_rate.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); VkPhysicalDeviceFragmentShadingRatePropertiesKHR fsr_properties = LvlInitStruct<VkPhysicalDeviceFragmentShadingRatePropertiesKHR>(); VkPhysicalDeviceProperties2KHR properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&fsr_properties); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); if (fsr_properties.fragmentShadingRateNonTrivialCombinerOps) { printf("%s requires fragmentShadingRateNonTrivialCombinerOps to be unsupported.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); VkPhysicalDeviceFragmentShadingRateFeaturesKHR fsr_features = LvlInitStruct<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(); VkPhysicalDeviceFeatures2KHR features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&fsr_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!fsr_features.primitiveFragmentShadingRate && !fsr_features.attachmentFragmentShadingRate) { printf("%s requires primitiveFragmentShadingRate or attachmentFragmentShadingRate to be supported.\n", kSkipPrefix); return; } fsr_features.pipelineFragmentShadingRate = VK_TRUE; fsr_features.primitiveFragmentShadingRate = VK_TRUE; fsr_features.attachmentFragmentShadingRate = VK_TRUE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineFragmentShadingRateStateCreateInfoKHR fsr_ci = LvlInitStruct<VkPipelineFragmentShadingRateStateCreateInfoKHR>(); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 1; auto set_fsr_ci = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pNext = &fsr_ci; }; if (fsr_features.primitiveFragmentShadingRate) { fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506"); fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; } if (fsr_features.attachmentFragmentShadingRate) { fsr_ci.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MUL_KHR; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-fragmentShadingRateNonTrivialCombinerOps-04506"); fsr_ci.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; } } TEST_F(VkLayerTest, InvalidPrimitiveFragmentShadingRateWriteMultiViewportLimit) { TEST_DESCRIPTION("Test static validation of the primitiveFragmentShadingRateWithMultipleViewports limit"); // Enable KHR_fragment_shading_rate and all of its required extensions bool fsr_extensions = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (fsr_extensions) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_1_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MAINTENANCE_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_MULTIVIEW_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); fsr_extensions = fsr_extensions && DeviceExtensionSupported(gpu(), nullptr, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); if (fsr_extensions) { m_device_extension_names.push_back(VK_KHR_MAINTENANCE_1_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MAINTENANCE_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_MULTIVIEW_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_CREATE_RENDERPASS_2_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); } else { printf("%s requires VK_KHR_fragment_shading_rate.\n", kSkipPrefix); return; } bool vil_extension = DeviceExtensionSupported(gpu(), nullptr, VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); if (vil_extension) { m_device_extension_names.push_back(VK_EXT_SHADER_VIEWPORT_INDEX_LAYER_EXTENSION_NAME); } bool va2_extension = DeviceExtensionSupported(gpu(), nullptr, VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME); if (va2_extension) { m_device_extension_names.push_back(VK_NV_VIEWPORT_ARRAY_2_EXTENSION_NAME); } PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); VkPhysicalDeviceFragmentShadingRatePropertiesKHR fsr_properties = LvlInitStruct<VkPhysicalDeviceFragmentShadingRatePropertiesKHR>(); VkPhysicalDeviceProperties2KHR properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&fsr_properties); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); if (fsr_properties.primitiveFragmentShadingRateWithMultipleViewports) { printf("%s requires primitiveFragmentShadingRateWithMultipleViewports to be unsupported.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); VkPhysicalDeviceFragmentShadingRateFeaturesKHR fsr_features = LvlInitStruct<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(); VkPhysicalDeviceFeatures2KHR features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&fsr_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!fsr_features.primitiveFragmentShadingRate) { printf("%s requires primitiveFragmentShadingRate to be supported.\n", kSkipPrefix); return; } if (!features2.features.multiViewport) { printf("%s requires multiViewport to be supported.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Test PrimitiveShadingRate writes with multiple viewports { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_fragment_shading_rate : enable void main() { gl_PrimitiveShadingRateEXT = gl_ShadingRateFlag4VerticalPixelsEXT | gl_ShadingRateFlag4HorizontalPixelsEXT; } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkViewport viewports[2] = {{0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}}; VkRect2D scissors[2] = {}; auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo()}; info.vp_state_ci_.viewportCount = 2; info.vp_state_ci_.pViewports = viewports; info.vp_state_ci_.scissorCount = 2; info.vp_state_ci_.pScissors = scissors; }; CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04503"); } // Test PrimitiveShadingRate writes with ViewportIndex writes in a geometry shader if (features2.features.geometryShader) { char const *vsSource = R"glsl( #version 450 void main() {} )glsl"; static char const *gsSource = R"glsl( #version 450 #extension GL_EXT_fragment_shading_rate : enable layout (points) in; layout (points) out; layout (max_vertices = 1) out; void main() { gl_PrimitiveShadingRateEXT = gl_ShadingRateFlag4VerticalPixelsEXT | gl_ShadingRateFlag4HorizontalPixelsEXT; gl_Position = vec4(1.0, 0.5, 0.5, 0.0); gl_ViewportIndex = 0; gl_PointSize = 1.0f; EmitVertex(); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04504"); } // Test PrimitiveShadingRate writes with ViewportIndex writes in a vertex shader if (vil_extension) { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_fragment_shading_rate : enable #extension GL_ARB_shader_viewport_layer_array : enable void main() { gl_PrimitiveShadingRateEXT = gl_ShadingRateFlag4VerticalPixelsEXT | gl_ShadingRateFlag4HorizontalPixelsEXT; gl_ViewportIndex = 0; } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04504"); } if (va2_extension) { // Test PrimitiveShadingRate writes with ViewportIndex writes in a geometry shader if (features2.features.geometryShader) { char const *vsSource = R"glsl( #version 450 void main() {} )glsl"; static char const *gsSource = R"glsl( #version 450 #extension GL_EXT_fragment_shading_rate : enable #extension GL_NV_viewport_array2 : enable layout (points) in; layout (points) out; layout (max_vertices = 1) out; void main() { gl_PrimitiveShadingRateEXT = gl_ShadingRateFlag4VerticalPixelsEXT | gl_ShadingRateFlag4HorizontalPixelsEXT; gl_ViewportMask[0] = 0; gl_Position = vec4(1.0, 0.5, 0.5, 0.0); gl_PointSize = 1.0f; EmitVertex(); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04505"); } // Test PrimitiveShadingRate writes with ViewportIndex writes in a vertex shader if (vil_extension) { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_fragment_shading_rate : enable #extension GL_NV_viewport_array2 : enable void main() { gl_PrimitiveShadingRateEXT = gl_ShadingRateFlag4VerticalPixelsEXT | gl_ShadingRateFlag4HorizontalPixelsEXT; gl_ViewportMask[0] = 0; } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); auto info_override = [&](CreatePipelineHelper &info) { info.shader_stages_ = {vs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, info_override, VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkGraphicsPipelineCreateInfo-primitiveFragmentShadingRateWithMultipleViewports-04505"); } } } TEST_F(VkLayerTest, SampledInvalidImageViews) { TEST_DESCRIPTION("Test if an VkImageView is sampled at draw/dispatch that the format has valid format features enabled"); VkResult err; if (!EnableDeviceProfileLayer()) { printf("%s Couldn't enable device profile layer.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); ASSERT_NO_FATAL_FAILURE(InitViewport()); PFN_vkSetPhysicalDeviceFormatPropertiesEXT fpvkSetPhysicalDeviceFormatPropertiesEXT = nullptr; PFN_vkGetOriginalPhysicalDeviceFormatPropertiesEXT fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT = nullptr; // Load required functions if (!LoadDeviceProfileLayer(fpvkSetPhysicalDeviceFormatPropertiesEXT, fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT)) { printf("%s Required extensions are not present.\n", kSkipPrefix); return; } const VkFormat sampled_format = VK_FORMAT_R8G8B8A8_UNORM; // Remove format features want to test if missing VkFormatProperties formatProps; fpvkGetOriginalPhysicalDeviceFormatPropertiesEXT(gpu(), sampled_format, &formatProps); formatProps.optimalTilingFeatures = (formatProps.optimalTilingFeatures & ~VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT); fpvkSetPhysicalDeviceFormatPropertiesEXT(gpu(), sampled_format, formatProps); VkImageObj image(m_device); image.Init(128, 128, 1, sampled_format, VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView imageView = image.targetView(sampled_format); // maps to VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER char const *fs_source_combined = R"glsl( #version 450 layout (set=0, binding=0) uniform sampler2D samplerColor; layout(location=0) out vec4 color; void main() { color = texture(samplerColor, gl_FragCoord.xy); color += texture(samplerColor, gl_FragCoord.wz); } )glsl"; VkShaderObj fs_combined(this, fs_source_combined, VK_SHADER_STAGE_FRAGMENT_BIT); // maps to VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE and VK_DESCRIPTOR_TYPE_SAMPLER char const *fs_source_seperate = R"glsl( #version 450 layout (set=0, binding=0) uniform texture2D textureColor; layout (set=0, binding=1) uniform sampler samplers; layout(location=0) out vec4 color; void main() { color = texture(sampler2D(textureColor, samplers), gl_FragCoord.xy); } )glsl"; VkShaderObj fs_seperate(this, fs_source_seperate, VK_SHADER_STAGE_FRAGMENT_BIT); // maps to an unused image sampler that should not trigger validation as it is never sampled char const *fs_source_unused = R"glsl( #version 450 layout (set=0, binding=0) uniform sampler2D samplerColor; layout(location=0) out vec4 color; void main() { color = vec4(gl_FragCoord.xyz, 1.0); } )glsl"; VkShaderObj fs_unused(this, fs_source_unused, VK_SHADER_STAGE_FRAGMENT_BIT); // maps to VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER but makes sure it walks function tree to find sampling char const *fs_source_function = R"glsl( #version 450 layout (set=0, binding=0) uniform sampler2D samplerColor; layout(location=0) out vec4 color; vec4 foo() { return texture(samplerColor, gl_FragCoord.xy); } vec4 bar(float x) { return (x > 0.5) ? foo() : vec4(1.0,1.0,1.0,1.0); } void main() { color = bar(gl_FragCoord.x); } )glsl"; VkShaderObj fs_function(this, fs_source_function, VK_SHADER_STAGE_FRAGMENT_BIT); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkRenderpassObj render_pass(m_device); VkPipelineObj pipeline_combined(m_device); pipeline_combined.AddDefaultColorAttachment(); pipeline_combined.SetViewport(m_viewports); pipeline_combined.SetScissor(m_scissors); pipeline_combined.AddShader(&vs); VkPipelineObj pipeline_seperate(m_device); pipeline_seperate.AddDefaultColorAttachment(); pipeline_seperate.SetViewport(m_viewports); pipeline_seperate.SetScissor(m_scissors); pipeline_seperate.AddShader(&vs); VkPipelineObj pipeline_unused(m_device); pipeline_unused.AddDefaultColorAttachment(); pipeline_unused.SetViewport(m_viewports); pipeline_unused.SetScissor(m_scissors); pipeline_unused.AddShader(&vs); VkPipelineObj pipeline_function(m_device); pipeline_function.AddDefaultColorAttachment(); pipeline_function.SetViewport(m_viewports); pipeline_function.SetScissor(m_scissors); pipeline_function.AddShader(&vs); // 4 different pipelines for 4 different shaders // 3 are invalid and 1 (pipeline_unused) is valid pipeline_combined.AddShader(&fs_combined); pipeline_seperate.AddShader(&fs_seperate); pipeline_unused.AddShader(&fs_unused); pipeline_function.AddShader(&fs_function); OneOffDescriptorSet::Bindings combined_bindings = { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; OneOffDescriptorSet::Bindings seperate_bindings = { {0, VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, {1, VK_DESCRIPTOR_TYPE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}}; OneOffDescriptorSet combined_descriptor_set(m_device, combined_bindings); OneOffDescriptorSet seperate_descriptor_set(m_device, seperate_bindings); const VkPipelineLayoutObj combined_pipeline_layout(m_device, {&combined_descriptor_set.layout_}); const VkPipelineLayoutObj seperate_pipeline_layout(m_device, {&seperate_descriptor_set.layout_}); pipeline_combined.CreateVKPipeline(combined_pipeline_layout.handle(), render_pass.handle()); pipeline_seperate.CreateVKPipeline(seperate_pipeline_layout.handle(), render_pass.handle()); pipeline_unused.CreateVKPipeline(combined_pipeline_layout.handle(), render_pass.handle()); pipeline_function.CreateVKPipeline(combined_pipeline_layout.handle(), render_pass.handle()); VkSamplerCreateInfo sampler_ci = SafeSaneSamplerCreateInfo(); sampler_ci.minFilter = VK_FILTER_LINEAR; // turned off feature bit for test sampler_ci.compareEnable = VK_FALSE; VkSampler sampler; err = vk::CreateSampler(device(), &sampler_ci, NULL, &sampler); ASSERT_VK_SUCCESS(err); VkDescriptorImageInfo combined_sampler_info = {sampler, imageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkDescriptorImageInfo seperate_sampled_image_info = {VK_NULL_HANDLE, imageView, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL}; VkDescriptorImageInfo seperate_sampler_info = {sampler, VK_NULL_HANDLE, VK_IMAGE_LAYOUT_UNDEFINED}; // first item is combined, second/third item are seperate VkWriteDescriptorSet descriptor_writes[3] = {}; descriptor_writes[0] = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_writes[0].dstSet = combined_descriptor_set.set_; descriptor_writes[0].dstBinding = 0; descriptor_writes[0].descriptorCount = 1; descriptor_writes[0].descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER; descriptor_writes[0].pImageInfo = &combined_sampler_info; descriptor_writes[1] = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_writes[1].dstSet = seperate_descriptor_set.set_; descriptor_writes[1].dstBinding = 0; descriptor_writes[1].descriptorCount = 1; descriptor_writes[1].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE; descriptor_writes[1].pImageInfo = &seperate_sampled_image_info; descriptor_writes[2] = LvlInitStruct<VkWriteDescriptorSet>(); descriptor_writes[2].dstSet = seperate_descriptor_set.set_; descriptor_writes[2].dstBinding = 1; descriptor_writes[2].descriptorCount = 1; descriptor_writes[2].descriptorType = VK_DESCRIPTOR_TYPE_SAMPLER; descriptor_writes[2].pImageInfo = &seperate_sampler_info; vk::UpdateDescriptorSets(m_device->device(), 3, descriptor_writes, 0, nullptr); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); // Unused is a valid version of the combined pipeline/descriptors vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_unused.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, combined_pipeline_layout.handle(), 0, 1, &combined_descriptor_set.set_, 0, nullptr); m_errorMonitor->ExpectSuccess(); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyNotFound(); // Same descriptor set as combined test vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_function.handle()); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-magFilter-04553"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Draw with invalid combined image sampler vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_combined.handle()); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-magFilter-04553"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // Same error, but not with seperate descriptors vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_seperate.handle()); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, seperate_pipeline_layout.handle(), 0, 1, &seperate_descriptor_set.set_, 0, nullptr); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-magFilter-04553"); m_commandBuffer->Draw(1, 0, 0, 0); m_errorMonitor->VerifyFound(); // cleanup vk::DestroySampler(device(), sampler, nullptr); } TEST_F(VkLayerTest, ShaderDrawParametersNotEnabled10) { TEST_DESCRIPTION("Validation using DrawParameters for Vulkan 1.0 without the shaderDrawParameters feature enabled."); SetTargetApiVersion(VK_API_VERSION_1_0); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (DeviceValidationVersion() > VK_API_VERSION_1_0) { printf("%s Tests requires Vulkan 1.0 only, skipping test\n", kSkipPrefix); return; } char const *vsSource = R"glsl( #version 460 void main(){ gl_Position = vec4(float(gl_BaseVertex)); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", // Extension not enabled "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // The capability not valid } } TEST_F(VkLayerTest, ShaderDrawParametersNotEnabled11) { TEST_DESCRIPTION("Validation using DrawParameters for Vulkan 1.1 without the shaderDrawParameters feature enabled."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } char const *vsSource = R"glsl( #version 460 void main(){ gl_Position = vec4(float(gl_BaseVertex)); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_GLSL_TRY); // make sure using SPIR-V 1.3 as extension is core and not needed in Vulkan then if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource, false, SPV_ENV_VULKAN_1_1)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkShaderModuleCreateInfo-pCode-01091"); } } TEST_F(VkLayerTest, ShaderFloatControl) { TEST_DESCRIPTION("Test VK_KHR_shader_float_controls"); // Need 1.1 to get SPIR-V 1.3 since OpExecutionModeId was added in SPIR-V 1.2 SetTargetApiVersion(VK_API_VERSION_1_1); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s test requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } // The issue with revision 4 of this extension should not be an issue with the tests if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); auto shader_float_control = LvlInitStruct<VkPhysicalDeviceFloatControlsProperties>(); auto properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&shader_float_control); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); // Check for support of 32-bit properties, but only will test if they are not supported // in case all 16/32/64 version are not supported will set SetUnexpectedError for capability check bool signed_zero_inf_nan_preserve = (shader_float_control.shaderSignedZeroInfNanPreserveFloat32 == VK_TRUE); bool denorm_preserve = (shader_float_control.shaderDenormPreserveFloat32 == VK_TRUE); bool denorm_flush_to_zero = (shader_float_control.shaderDenormFlushToZeroFloat32 == VK_TRUE); bool rounding_mode_rte = (shader_float_control.shaderRoundingModeRTEFloat32 == VK_TRUE); bool rounding_mode_rtz = (shader_float_control.shaderRoundingModeRTZFloat32 == VK_TRUE); // same body for each shader, only the start is different // this is just "float a = 1.0 + 2.0;" in SPIR-V const std::string source_body = R"( OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpName %main "main" %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %pFunction = OpTypePointer Function %float %float_3 = OpConstant %float 3 %main = OpFunction %void None %3 %5 = OpLabel %6 = OpVariable %pFunction Function OpStore %6 %float_3 OpReturn OpFunctionEnd )"; if (!signed_zero_inf_nan_preserve) { const std::string spv_source = R"( OpCapability Shader OpCapability SignedZeroInfNanPreserve OpExtension "SPV_KHR_float_controls" %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main SignedZeroInfNanPreserve 32 )" + source_body; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-shaderSignedZeroInfNanPreserveFloat32-06294"); } if (!denorm_preserve) { const std::string spv_source = R"( OpCapability Shader OpCapability DenormPreserve OpExtension "SPV_KHR_float_controls" %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main DenormPreserve 32 )" + source_body; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-shaderDenormPreserveFloat32-06297"); } if (!denorm_flush_to_zero) { const std::string spv_source = R"( OpCapability Shader OpCapability DenormFlushToZero OpExtension "SPV_KHR_float_controls" %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main DenormFlushToZero 32 )" + source_body; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-shaderDenormFlushToZeroFloat32-06300"); } if (!rounding_mode_rte) { const std::string spv_source = R"( OpCapability Shader OpCapability RoundingModeRTE OpExtension "SPV_KHR_float_controls" %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main RoundingModeRTE 32 )" + source_body; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-shaderRoundingModeRTEFloat32-06303"); } if (!rounding_mode_rtz) { const std::string spv_source = R"( OpCapability Shader OpCapability RoundingModeRTZ OpExtension "SPV_KHR_float_controls" %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main RoundingModeRTZ 32 )" + source_body; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-shaderRoundingModeRTZFloat32-06306"); } } TEST_F(VkLayerTest, Storage8and16bit) { TEST_DESCRIPTION("Test VK_KHR_8bit_storage and VK_KHR_16bit_storage"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); bool support_8_bit = DeviceExtensionSupported(gpu(), nullptr, VK_KHR_8BIT_STORAGE_EXTENSION_NAME); bool support_16_bit = DeviceExtensionSupported(gpu(), nullptr, VK_KHR_16BIT_STORAGE_EXTENSION_NAME); if ((support_8_bit == false) && (support_16_bit == false)) { printf("%s Extension %s and %s are not supported.\n", kSkipPrefix, VK_KHR_8BIT_STORAGE_EXTENSION_NAME, VK_KHR_16BIT_STORAGE_EXTENSION_NAME); return; } else if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME) == false) { // need for all shaders, but not guaranteed from driver to have support printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); return; } else { m_device_extension_names.push_back(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_STORAGE_BUFFER_STORAGE_CLASS_EXTENSION_NAME); if (support_8_bit == true) { m_device_extension_names.push_back(VK_KHR_8BIT_STORAGE_EXTENSION_NAME); } if (support_16_bit == true) { m_device_extension_names.push_back(VK_KHR_16BIT_STORAGE_EXTENSION_NAME); } } // Need to explicitly turn off shaderInt16 as test will try to add and easier if all test have off VkPhysicalDeviceFeatures features = {}; features.shaderInt16 = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(&features)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // storageBuffer8BitAccess { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_8bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int8: enable layout(set = 0, binding = 0) buffer SSBO { int8_t x; } data; void main(){ int8_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int8 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StorageBuffer8BitAccess } } // uniformAndStorageBuffer8BitAccess { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_8bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int8: enable layout(set = 0, binding = 0) uniform UBO { int8_t x; } data; void main(){ int8_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int8 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // UniformAndStorageBuffer8BitAccess } } // storagePushConstant8 { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_8bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int8: enable layout(push_constant) uniform PushConstant { int8_t x; } data; void main(){ int8_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { VkPushConstantRange push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}; VkPipelineLayoutCreateInfo pipeline_layout_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &push_constant_range}; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.pipeline_layout_ci_ = pipeline_layout_info; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int8 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StoragePushConstant8 } } // storageBuffer16BitAccess - Float { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_float16: enable layout(set = 0, binding = 0) buffer SSBO { float16_t x; } data; void main(){ float16_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Float16 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StorageBuffer16BitAccess } } // uniformAndStorageBuffer16BitAccess - Float { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_float16: enable layout(set = 0, binding = 0) uniform UBO { float16_t x; } data; void main(){ float16_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Float16 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // UniformAndStorageBuffer16BitAccess } } // storagePushConstant16 - Float { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_float16: enable layout(push_constant) uniform PushConstant { float16_t x; } data; void main(){ float16_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { VkPushConstantRange push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}; VkPipelineLayoutCreateInfo pipeline_layout_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &push_constant_range}; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.pipeline_layout_ci_ = pipeline_layout_info; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Float16 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StoragePushConstant16 } } // storageInputOutput16 - Float { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_float16: enable layout(location = 0) out float16_t outData; void main(){ outData = float16_t(1); gl_Position = vec4(0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); // Need to match in/out char const *fsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_float16: enable layout(location = 0) in float16_t x; layout(location = 0) out vec4 uFragColor; void main(){ uFragColor = vec4(0,1,0,1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if ((VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) && (VK_SUCCESS == fs.InitFromGLSLTry(fsSource))) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Float16 vert "VUID-VkShaderModuleCreateInfo-pCode-01091", // StorageInputOutput16 vert "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StorageInputOutput16 frag } } // storageBuffer16BitAccess - Int { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int16: enable layout(set = 0, binding = 0) buffer SSBO { int16_t x; } data; void main(){ int16_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int16 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StorageBuffer16BitAccess } } // uniformAndStorageBuffer16BitAccess - Int { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int16: enable layout(set = 0, binding = 0) uniform UBO { int16_t x; } data; void main(){ int16_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int16 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // UniformAndStorageBuffer16BitAccess } } // storagePushConstant16 - Int { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int16: enable layout(push_constant) uniform PushConstant { int16_t x; } data; void main(){ int16_t a = data.x + data.x; gl_Position = vec4(float(a) * 0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) { VkPushConstantRange push_constant_range = {VK_SHADER_STAGE_VERTEX_BIT, 0, 4}; VkPipelineLayoutCreateInfo pipeline_layout_info{ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, nullptr, 0, 0, nullptr, 1, &push_constant_range}; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; helper.pipeline_layout_ci_ = pipeline_layout_info; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int16 "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StoragePushConstant16 } } // storageInputOutput16 - Int { char const *vsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int16: enable layout(location = 0) out int16_t outData; void main(){ outData = int16_t(1); gl_Position = vec4(0.0); } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); // Need to match in/out char const *fsSource = R"glsl( #version 450 #extension GL_EXT_shader_16bit_storage: enable #extension GL_EXT_shader_explicit_arithmetic_types_int16: enable layout(location = 0) flat in int16_t x; layout(location = 0) out vec4 uFragColor; void main(){ uFragColor = vec4(0,1,0,1); } )glsl"; VkShaderObj fs(this, vsSource, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if ((VK_SUCCESS == vs.InitFromGLSLTry(vsSource)) && (VK_SUCCESS == fs.InitFromGLSLTry(fsSource))) { const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", // Int16 vert "VUID-VkShaderModuleCreateInfo-pCode-01091", // StorageInputOutput16 vert "VUID-VkShaderModuleCreateInfo-pCode-01091"}); // StorageInputOutput16 frag } } } TEST_F(VkLayerTest, WorkgroupMemoryExplicitLayout) { TEST_DESCRIPTION("Test VK_KHR_workgroup_memory_explicit_layout"); SetTargetApiVersion(VK_API_VERSION_1_2); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Test requires Vulkan >= 1.2.\n", kSkipPrefix); return; } auto float16int8_features = LvlInitStruct<VkPhysicalDeviceShaderFloat16Int8Features>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&float16int8_features); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); const bool support_8_bit = (float16int8_features.shaderInt8 == VK_TRUE); const bool support_16_bit = (float16int8_features.shaderFloat16 == VK_TRUE) && (features2.features.shaderInt16 == VK_TRUE); // WorkgroupMemoryExplicitLayoutKHR { const std::string spv_source = R"( OpCapability Shader OpCapability WorkgroupMemoryExplicitLayoutKHR OpExtension "SPV_KHR_workgroup_memory_explicit_layout" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" %_ OpExecutionMode %main LocalSize 8 1 1 OpMemberDecorate %first 0 Offset 0 OpDecorate %first Block %void = OpTypeVoid %3 = OpTypeFunction %void %int = OpTypeInt 32 1 %first = OpTypeStruct %int %_ptr_Workgroup_first = OpTypePointer Workgroup %first %_ = OpVariable %_ptr_Workgroup_first Workgroup %int_0 = OpConstant %int 0 %int_2 = OpConstant %int 2 %_ptr_Workgroup_int = OpTypePointer Workgroup %int %main = OpFunction %void None %3 %5 = OpLabel %13 = OpAccessChain %_ptr_Workgroup_int %_ %int_0 OpStore %13 %int_2 OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_2, SPV_SOURCE_ASM)); }; // Both missing enabling the extension and capability feature CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147"}); } // WorkgroupMemoryExplicitLayout8BitAccessKHR if (support_8_bit) { const std::string spv_source = R"( OpCapability Shader OpCapability Int8 OpCapability WorkgroupMemoryExplicitLayout8BitAccessKHR OpExtension "SPV_KHR_workgroup_memory_explicit_layout" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" %_ OpExecutionMode %main LocalSize 2 1 1 OpMemberDecorate %first 0 Offset 0 OpDecorate %first Block %void = OpTypeVoid %3 = OpTypeFunction %void %char = OpTypeInt 8 1 %first = OpTypeStruct %char %_ptr_Workgroup_first = OpTypePointer Workgroup %first %_ = OpVariable %_ptr_Workgroup_first Workgroup %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %char_2 = OpConstant %char 2 %_ptr_Workgroup_char = OpTypePointer Workgroup %char %main = OpFunction %void None %3 %5 = OpLabel %14 = OpAccessChain %_ptr_Workgroup_char %_ %int_0 OpStore %14 %char_2 OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_2, SPV_SOURCE_ASM)); }; // Both missing enabling the extension and capability feature CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147"}); } // WorkgroupMemoryExplicitLayout16BitAccessKHR if (support_16_bit) { const std::string spv_source = R"( OpCapability Shader OpCapability Float16 OpCapability Int16 OpCapability WorkgroupMemoryExplicitLayout16BitAccessKHR OpExtension "SPV_KHR_workgroup_memory_explicit_layout" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" %_ OpExecutionMode %main LocalSize 2 1 1 OpMemberDecorate %first 0 Offset 0 OpMemberDecorate %first 1 Offset 2 OpDecorate %first Block %void = OpTypeVoid %3 = OpTypeFunction %void %short = OpTypeInt 16 1 %half = OpTypeFloat 16 %first = OpTypeStruct %short %half %_ptr_Workgroup_first = OpTypePointer Workgroup %first %_ = OpVariable %_ptr_Workgroup_first Workgroup %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %short_3 = OpConstant %short 3 %_ptr_Workgroup_short = OpTypePointer Workgroup %short %int_1 = OpConstant %int 1 %half_0x1_898p_3 = OpConstant %half 0x1.898p+3 %_ptr_Workgroup_half = OpTypePointer Workgroup %half %main = OpFunction %void None %3 %5 = OpLabel %15 = OpAccessChain %_ptr_Workgroup_short %_ %int_0 OpStore %15 %short_3 %19 = OpAccessChain %_ptr_Workgroup_half %_ %int_1 OpStore %19 %half_0x1_898p_3 OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_2, SPV_SOURCE_ASM)); }; // Both missing enabling the extension and capability feature CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147"}); } // workgroupMemoryExplicitLayoutScalarBlockLayout feature // will fail from not passing --workgroup-scalar-block-layout in spirv-val { const std::string spv_source = R"( OpCapability Shader OpCapability WorkgroupMemoryExplicitLayoutKHR OpExtension "SPV_KHR_workgroup_memory_explicit_layout" OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" %B OpSource GLSL 450 OpMemberDecorate %S 0 Offset 0 OpMemberDecorate %S 1 Offset 4 OpMemberDecorate %S 2 Offset 16 OpMemberDecorate %S 3 Offset 28 OpDecorate %S Block OpDecorate %B Aliased %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %S = OpTypeStruct %float %v3float %v3float %v3float %_ptr_Workgroup_S = OpTypePointer Workgroup %S %B = OpVariable %_ptr_Workgroup_S Workgroup %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-InconsistentSpirv"); VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_2); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ReadShaderClock) { TEST_DESCRIPTION("Test VK_KHR_shader_clock"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_SHADER_CLOCK_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SHADER_CLOCK_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_SHADER_CLOCK_EXTENSION_NAME); return; } // Don't enable either feature bit on purpose ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); // Device scope using GL_EXT_shader_realtime_clock char const *vsSourceDevice = R"glsl( #version 450 #extension GL_EXT_shader_realtime_clock: enable void main(){ uvec2 a = clockRealtime2x32EXT(); gl_Position = vec4(float(a.x) * 0.0); } )glsl"; VkShaderObj vs_device(this, vsSourceDevice, VK_SHADER_STAGE_VERTEX_BIT); // Subgroup scope using ARB_shader_clock char const *vsSourceScope = R"glsl( #version 450 #extension GL_ARB_shader_clock: enable void main(){ uvec2 a = clock2x32ARB(); gl_Position = vec4(float(a.x) * 0.0); } )glsl"; VkShaderObj vs_subgroup(this, vsSourceScope, VK_SHADER_STAGE_VERTEX_BIT); const auto set_info_device = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs_device.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info_device, kErrorBit, "VUID-RuntimeSpirv-shaderDeviceClock-06268"); const auto set_info_subgroup = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs_subgroup.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info_subgroup, kErrorBit, "VUID-RuntimeSpirv-shaderSubgroupClock-06267"); } TEST_F(VkLayerTest, GraphicsPipelineInvalidFlags) { TEST_DESCRIPTION("Create a graphics pipeline with invalid VkPipelineCreateFlags."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineCreateFlags flags; const auto set_info = [&](CreatePipelineHelper &helper) { helper.gp_ci_.flags = flags; }; flags = VK_PIPELINE_CREATE_DISPATCH_BASE; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-00764"); flags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03371"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03372"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03373"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03374"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03375"); flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03376"); flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03377"); flags = VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-03577"); flags = VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-04947"); } TEST_F(VkLayerTest, ComputePipelineInvalidFlags) { TEST_DESCRIPTION("Create a compute pipeline with invalid VkPipelineCreateFlags."); ASSERT_NO_FATAL_FAILURE(Init()); VkPipelineCreateFlags flags; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cp_ci_.flags = flags; }; flags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03364"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03365"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03366"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03367"); flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03368"); flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03369"); flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03370"); flags = VK_PIPELINE_CREATE_RAY_TRACING_SHADER_GROUP_HANDLE_CAPTURE_REPLAY_BIT_KHR; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-03576"); flags = VK_PIPELINE_CREATE_RAY_TRACING_ALLOW_MOTION_BIT_NV; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-04945"); flags = VK_PIPELINE_CREATE_INDIRECT_BINDABLE_BIT_NV; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-02874"); } TEST_F(VkLayerTest, UsingProvokingVertexModeLastVertexExtWithoutEnabled) { TEST_DESCRIPTION("Test using VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT but it doesn't enable provokingVertexLast."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper pipe(*this); pipe.InitInfo(); auto provoking_vertex_state_ci = LvlInitStruct<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(); provoking_vertex_state_ci.provokingVertexMode = VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT; pipe.rs_state_ci_.pNext = &provoking_vertex_state_ci; pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineRasterizationProvokingVertexStateCreateInfoEXT-provokingVertexMode-04883"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NotSupportProvokingVertexModePerPipeline) { TEST_DESCRIPTION( "Test using different VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT but it doesn't support provokingVertexModePerPipeline."); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceExtensionSupported(gpu(), nullptr, VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME); } else { printf("%s Extension %s is not supported, skipping tests\n", kSkipPrefix, VK_EXT_PROVOKING_VERTEX_EXTENSION_NAME); return; } auto provoking_vertex_properties = LvlInitStruct<VkPhysicalDeviceProvokingVertexPropertiesEXT>(); auto properties2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&provoking_vertex_properties); vk::GetPhysicalDeviceProperties2(gpu(), &properties2); if (provoking_vertex_properties.provokingVertexModePerPipeline == VK_TRUE) { printf("%s provokingVertexModePerPipeline is VK_TRUE, skipping tests\n", kSkipPrefix); return; } auto provoking_vertex_features = LvlInitStruct<VkPhysicalDeviceProvokingVertexFeaturesEXT>(); provoking_vertex_features.provokingVertexLast = VK_TRUE; auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&provoking_vertex_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper pipe1(*this); pipe1.InitInfo(); auto provoking_vertex_state_ci = LvlInitStruct<VkPipelineRasterizationProvokingVertexStateCreateInfoEXT>(); provoking_vertex_state_ci.provokingVertexMode = VK_PROVOKING_VERTEX_MODE_FIRST_VERTEX_EXT; pipe1.rs_state_ci_.pNext = &provoking_vertex_state_ci; pipe1.InitState(); pipe1.CreateGraphicsPipeline(); CreatePipelineHelper pipe2(*this); pipe2.InitInfo(); provoking_vertex_state_ci.provokingVertexMode = VK_PROVOKING_VERTEX_MODE_LAST_VERTEX_EXT; pipe2.rs_state_ci_.pNext = &provoking_vertex_state_ci; pipe2.InitState(); pipe2.CreateGraphicsPipeline(); CreatePipelineHelper pipe3(*this); pipe3.InitInfo(); pipe3.InitState(); pipe3.CreateGraphicsPipeline(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe1.pipeline_); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe2.pipeline_); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe1.pipeline_); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-vkCmdBindPipeline-pipelineBindPoint-04881"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe3.pipeline_); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, SpecializationInvalidSizeZero) { TEST_DESCRIPTION("Make sure an error is logged when a specialization map entry's size is 0"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); const char *cs_src = R"glsl( #version 450 layout (constant_id = 0) const int c = 3; layout (local_size_x = 1) in; void main() { if (gl_GlobalInvocationID.x >= c) { return; } } )glsl"; // Set the specialization constant size to 0 (anything other than 1, 2, 4, or 8 will produce the expected error). VkSpecializationMapEntry entry = { 0, // id 0, // offset 0, // size }; int32_t data = 0; const VkSpecializationInfo specialization_info = { 1, &entry, 1 * sizeof(decltype(data)), &data, }; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_ = layer_data::make_unique<VkShaderObj>(this, cs_src, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL, &specialization_info); pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); entry.size = sizeof(decltype(data)); pipe.cs_ = layer_data::make_unique<VkShaderObj>(this, cs_src, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL, &specialization_info); pipe.InitState(); m_errorMonitor->ExpectSuccess(); pipe.CreateComputePipeline(); m_errorMonitor->VerifyNotFound(); } TEST_F(VkLayerTest, MergePipelineCachesInvalidDst) { TEST_DESCRIPTION("Test mergeing pipeline caches with dst cache in src list"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.CreateGraphicsPipeline(); CreatePipelineHelper other_pipe(*this); other_pipe.InitInfo(); other_pipe.InitState(); other_pipe.CreateGraphicsPipeline(); VkPipelineCache dstCache = pipe.pipeline_cache_; VkPipelineCache srcCaches[2] = {other_pipe.pipeline_cache_, pipe.pipeline_cache_}; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkMergePipelineCaches-dstCache-00770"); vk::MergePipelineCaches(m_device->device(), dstCache, 2, srcCaches); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ValidateGeometryShaderEnabled) { TEST_DESCRIPTION("Validate geometry shader feature is enabled if geometry shader stage is used"); VkPhysicalDeviceFeatures deviceFeatures = {}; deviceFeatures.geometryShader = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&deviceFeatures)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, bindStateGeomShaderText, VK_SHADER_STAGE_GEOMETRY_BIT); auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP; helper.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkPipelineShaderStageCreateInfo-stage-00704", "VUID-VkShaderModuleCreateInfo-pCode-01091"}); } TEST_F(VkLayerTest, ValidateTessellationShaderEnabled) { TEST_DESCRIPTION( "Validate tessellation shader feature is enabled if tessellation control or tessellation evaluation shader stage is used"); VkPhysicalDeviceFeatures deviceFeatures = {}; deviceFeatures.tessellationShader = VK_FALSE; ASSERT_NO_FATAL_FAILURE(Init(&deviceFeatures)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *tcsSource = R"glsl( #version 450 layout(location=0) out int x[]; layout(vertices=3) out; void main(){ gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1; gl_TessLevelInner[0] = 1; x[gl_InvocationID] = gl_InvocationID; } )glsl"; char const *tesSource = R"glsl( #version 450 layout(triangles, equal_spacing, cw) in; layout(location=0) patch in int x; void main(){ gl_Position.xyz = gl_TessCoord; gl_Position.w = x; } )glsl"; VkShaderObj tcs(this, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj tes(this, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; auto set_info = [&](CreatePipelineHelper &helper) { helper.ia_ci_.topology = VK_PRIMITIVE_TOPOLOGY_PATCH_LIST; helper.gp_ci_.pTessellationState = &tsci; helper.gp_ci_.pInputAssemblyState = &iasci; helper.shader_stages_.emplace_back(tcs.GetStageCreateInfo()); helper.shader_stages_.emplace_back(tes.GetStageCreateInfo()); }; CreatePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkPipelineShaderStageCreateInfo-stage-00705", "VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkPipelineInputAssemblyStateCreateInfo-topology-00430"}); } TEST_F(VkLayerTest, CreateComputesPipelineWithBadBasePointer) { TEST_DESCRIPTION("Create Compute Pipeline with bad base pointer"); ASSERT_NO_FATAL_FAILURE(Init()); char const *csSource = R"glsl( #version 450 layout(local_size_x=2, local_size_y=4) in; void main(){ } )glsl"; VkShaderObj cs(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); std::vector<VkDescriptorSetLayoutBinding> bindings(0); const VkDescriptorSetLayoutObj pipeline_dsl(m_device, bindings); const VkPipelineLayoutObj pipeline_layout(m_device, {&pipeline_dsl}); VkComputePipelineCreateInfo compute_create_info = LvlInitStruct<VkComputePipelineCreateInfo>(); compute_create_info.flags = VK_PIPELINE_CREATE_DERIVATIVE_BIT; compute_create_info.stage = cs.GetStageCreateInfo(); compute_create_info.layout = pipeline_layout.handle(); VkPipeline test_pipeline; vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &compute_create_info, nullptr, &test_pipeline); { compute_create_info.basePipelineHandle = VK_NULL_HANDLE; compute_create_info.basePipelineIndex = 1; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-00698"); VkPipeline pipeline; vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &compute_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } if (test_pipeline != VK_NULL_HANDLE) { compute_create_info.basePipelineHandle = test_pipeline; compute_create_info.basePipelineIndex = 1; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-00699"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkComputePipelineCreateInfo-flags-00700"); VkPipeline pipeline; vk::CreateComputePipelines(device(), VK_NULL_HANDLE, 1, &compute_create_info, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, CreatePipelineWithDuplicatedSpecializationConstantID) { TEST_DESCRIPTION("Create a pipeline with non unique constantID in specialization pMapEntries."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); char const *fsSource = R"glsl( #version 450 layout (constant_id = 0) const float r = 0.0f; layout(location = 0) out vec4 uFragColor; void main(){ uFragColor = vec4(r,1,0,1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkSpecializationMapEntry entries[2]; entries[0].constantID = 0; entries[0].offset = 0; entries[0].size = sizeof(uint32_t); entries[1].constantID = 0; entries[1].offset = 0; entries[1].size = sizeof(uint32_t); uint32_t data = 1; VkSpecializationInfo specialization_info; specialization_info.mapEntryCount = 2; specialization_info.pMapEntries = entries; specialization_info.dataSize = sizeof(uint32_t); specialization_info.pData = &data; const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; helper.shader_stages_[1].pSpecializationInfo = &specialization_info; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationInfo-constantID-04911"); } TEST_F(VkLayerTest, PipelineSubgroupSizeControl) { TEST_DESCRIPTION("Test Subgroub Size Control"); SetTargetApiVersion(VK_API_VERSION_1_2); AddRequiredExtensions(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Test requires Vulkan >= 1.1\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); return; } VkPhysicalDeviceSubgroupSizeControlFeaturesEXT sscf = LvlInitStruct<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT>(); sscf.subgroupSizeControl = VK_TRUE; sscf.computeFullSubgroups = VK_TRUE; VkPhysicalDeviceFeatures2 pd_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&sscf); vk::GetPhysicalDeviceFeatures2(gpu(), &pd_features2); if (sscf.subgroupSizeControl == VK_FALSE || sscf.computeFullSubgroups == VK_FALSE) { printf("%s Required features are not supported, skipping test.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); auto subgroup_properties = LvlInitStruct<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT>(); auto props = LvlInitStruct<VkPhysicalDeviceProperties2>(&subgroup_properties); vk::GetPhysicalDeviceProperties2(gpu(), &props); auto subgroup_size_control = LvlInitStruct<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(); subgroup_size_control.requiredSubgroupSize = subgroup_properties.minSubgroupSize; VkPhysicalDeviceVulkan11Properties props11 = LvlInitStruct<VkPhysicalDeviceVulkan11Properties>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&props11); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); { CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.InitState(); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.pNext = &subgroup_size_control; cs_pipeline.cp_ci_.stage.flags = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-pNext-02754"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } if (subgroup_properties.maxSubgroupSize > 1) { std::stringstream csSource; csSource << R"glsl( #version 450 layout(local_size_x = )glsl"; csSource << subgroup_properties.maxSubgroupSize + 1; csSource << R"glsl() in; void main() {} )glsl"; CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource.str(), VK_SHADER_STAGE_COMPUTE_BIT)); cs_pipeline.InitState(); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.flags = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT | VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-flags-02758"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } if (props11.subgroupSize > 1) { std::stringstream csSource; csSource << R"glsl( #version 450 layout(local_size_x = )glsl"; csSource << props11.subgroupSize + 1; csSource << R"glsl() in; void main() {} )glsl"; CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_.reset(new VkShaderObj(this, csSource.str(), VK_SHADER_STAGE_COMPUTE_BIT)); cs_pipeline.InitState(); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.stage.flags = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-flags-02759"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, SubgroupSizeControlFeaturesNotEnabled) { TEST_DESCRIPTION("Use subgroup size control features when they are not enabled"); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); return; } VkPhysicalDeviceSubgroupSizeControlFeaturesEXT sscf = LvlInitStruct<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT>(); sscf.subgroupSizeControl = VK_FALSE; sscf.computeFullSubgroups = VK_FALSE; VkPhysicalDeviceFeatures2 pd_features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&sscf); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &pd_features2)); VkPhysicalDeviceVulkan11Properties props11 = LvlInitStruct<VkPhysicalDeviceVulkan11Properties>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&props11); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); if (props11.subgroupSize == 0) { printf("%s subgroupSize is 0, skipping test.\n", kSkipPrefix); return; } std::stringstream csSource; // Make sure compute pipeline has a compute shader stage set csSource << R"( #version 450 layout(local_size_x = )"; csSource << props11.subgroupSize; csSource << R"() in; void main(){ } )"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource.str(), VK_SHADER_STAGE_COMPUTE_BIT)); pipe.InitState(); pipe.LateBindPipelineInfo(); pipe.cp_ci_.stage.flags = VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-flags-02784"); pipe.CreateComputePipeline(true, false); m_errorMonitor->VerifyFound(); pipe.cp_ci_.stage.flags = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT_EXT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-flags-02785"); pipe.CreateComputePipeline(true, false); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ShaderAtomicInt64) { TEST_DESCRIPTION("Test VK_KHR_shader_atomic_int64."); SetTargetApiVersion(VK_API_VERSION_1_1); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } // Create device without VK_KHR_shader_atomic_int64 extension or features enabled ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); VkPhysicalDeviceFeatures available_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&available_features)); if (!available_features.shaderInt64) { printf("%s VkPhysicalDeviceFeatures::shaderInt64 is not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required for SPIR-V 1.3, skipping test.\n", kSkipPrefix); return; } // For sanity check without GL_EXT_shader_atomic_int64 std::string cs_positive = R"glsl( #version 450 #extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable #extension GL_KHR_memory_scope_semantics : enable shared uint64_t x; layout(set = 0, binding = 0) buffer ssbo { uint64_t y; }; void main() { y = x + 1; } )glsl"; std::string cs_base = R"glsl( #version 450 #extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable #extension GL_EXT_shader_atomic_int64 : enable #extension GL_KHR_memory_scope_semantics : enable shared uint64_t x; layout(set = 0, binding = 0) buffer ssbo { uint64_t y; }; void main() { )glsl"; // clang-format off // StorageBuffer storage class std::string cs_storage_buffer = cs_base + R"glsl( atomicAdd(y, 1); } )glsl"; // StorageBuffer storage class using AtomicStore // atomicStore is slightly different than other atomics, so good edge case std::string cs_store = cs_base + R"glsl( atomicStore(y, 1ul, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; // Workgroup storage class std::string cs_workgroup = cs_base + R"glsl( atomicAdd(x, 1); barrier(); y = x + 1; } )glsl"; // clang-format on const char *current_shader = nullptr; const auto set_info = [&](CreateComputePipelineHelper &helper) { // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class helper.cs_.reset(new VkShaderObj(this, current_shader, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1)); helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; current_shader = cs_positive.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "", true); // shaderBufferInt64Atomics current_shader = cs_storage_buffer.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-RuntimeSpirv-None-06278"}); current_shader = cs_store.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-RuntimeSpirv-None-06278"}); // shaderSharedInt64Atomics current_shader = cs_workgroup.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-RuntimeSpirv-None-06279"}); } TEST_F(VkLayerTest, PipelineInvalidAdvancedBlend) { TEST_DESCRIPTION("Create a graphics pipeline with advanced blend when its disabled"); if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT blend_operation_advanced = LvlInitStruct<VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&blend_operation_advanced); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); if (blend_operation_advanced.advancedBlendAllOperations == VK_TRUE) { printf("%s blend_operation_advanced.advancedBlendAllOperations is VK_TRUE.\n", kSkipPrefix); return; } CreatePipelineHelper pipe(*this); pipe.InitInfo(); VkPipelineColorBlendAttachmentState attachment_state = {}; attachment_state.blendEnable = VK_TRUE; attachment_state.colorBlendOp = VK_BLEND_OP_XOR_EXT; VkPipelineColorBlendStateCreateInfo color_blend_state = LvlInitStruct<VkPipelineColorBlendStateCreateInfo>(); color_blend_state.attachmentCount = 1; color_blend_state.pAttachments = &attachment_state; pipe.gp_ci_.pColorBlendState = &color_blend_state; pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorBlendAttachmentState-advancedBlendAllOperations-01409"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvlidPipelineDiscardRectangle) { TEST_DESCRIPTION("Create a graphics pipeline invalid VkPipelineDiscardRectangleStateCreateInfoEXT"); bool inst_ext = InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); if (inst_ext) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s %s not supported, skipping tests\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework()); if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME)) { printf("%s %s not supported, skipping test\n", kSkipPrefix, VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_DISCARD_RECTANGLES_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPhysicalDeviceDiscardRectanglePropertiesEXT discard_rectangle_properties = LvlInitStruct<VkPhysicalDeviceDiscardRectanglePropertiesEXT>(); auto phys_dev_props_2 = LvlInitStruct<VkPhysicalDeviceProperties2>(); phys_dev_props_2.pNext = &discard_rectangle_properties; vk::GetPhysicalDeviceProperties2(gpu(), &phys_dev_props_2); uint32_t count = discard_rectangle_properties.maxDiscardRectangles + 1; std::vector<VkRect2D> discard_rectangles(count); VkPipelineDiscardRectangleStateCreateInfoEXT discard_rectangle_state = LvlInitStruct<VkPipelineDiscardRectangleStateCreateInfoEXT>(); discard_rectangle_state.discardRectangleCount = count; discard_rectangle_state.pDiscardRectangles = discard_rectangles.data(); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.gp_ci_.pNext = &discard_rectangle_state; pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineDiscardRectangleStateCreateInfoEXT-discardRectangleCount-00582"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ShaderImageAtomicInt64) { TEST_DESCRIPTION("Test VK_EXT_shader_image_atomic_int64."); SetTargetApiVersion(VK_API_VERSION_1_1); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } // Create device without VK_EXT_shader_image_atomic_int64 extension or features enabled ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); VkPhysicalDeviceFeatures available_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&available_features)); if (!available_features.shaderInt64) { printf("%s VkPhysicalDeviceFeatures::shaderInt64 is not supported, skipping tests\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required for SPIR-V 1.3, skipping test.\n", kSkipPrefix); return; } // clang-format off std::string cs_image_base = R"glsl( #version 450 #extension GL_EXT_shader_explicit_arithmetic_types_int64 : enable #extension GL_EXT_shader_image_int64 : enable #extension GL_KHR_memory_scope_semantics : enable layout(set = 0, binding = 0) buffer ssbo { uint64_t y; }; layout(set = 0, binding = 1, r64ui) uniform u64image2D z; void main() { )glsl"; std::string cs_image_load = cs_image_base + R"glsl( y = imageAtomicLoad(z, ivec2(1, 1), gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed); } )glsl"; std::string cs_image_store = cs_image_base + R"glsl( imageAtomicStore(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed); } )glsl"; std::string cs_image_exchange = cs_image_base + R"glsl( imageAtomicExchange(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed); } )glsl"; std::string cs_image_add = cs_image_base + R"glsl( y = imageAtomicAdd(z, ivec2(1, 1), y); } )glsl"; // clang-format on std::unique_ptr<VkShaderObj> current_shader; const auto set_info = [&current_shader](CreateComputePipelineHelper &helper) { // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class helper.cs_ = std::move(current_shader); helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}, {1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; // shaderImageInt64Atomics // Need 01091 VUID check for both Int64ImageEXT and Int64Atomics.. test could be rewritten to be more complex in order to set // capability requirements with other features, but this is simpler current_shader = layer_data::make_unique<VkShaderObj>(this, cs_image_load, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == current_shader->InitFromGLSLTry(cs_image_load.c_str(), false, SPV_ENV_VULKAN_1_1)) { CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06288"}); } // glslang doesn't omit Int64Atomics for store currently current_shader = layer_data::make_unique<VkShaderObj>(this, cs_image_store, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == current_shader->InitFromGLSLTry(cs_image_store.c_str(), false, SPV_ENV_VULKAN_1_1)) { CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06288"}); } current_shader = layer_data::make_unique<VkShaderObj>(this, cs_image_exchange, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == current_shader->InitFromGLSLTry(cs_image_exchange.c_str(), false, SPV_ENV_VULKAN_1_1)) { CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06288"}); } current_shader = layer_data::make_unique<VkShaderObj>(this, cs_image_add, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_GLSL_TRY); m_errorMonitor->SetUnexpectedError(kVUID_Core_Shader_InconsistentSpirv); if (VK_SUCCESS == current_shader->InitFromGLSLTry(cs_image_add.c_str(), false, SPV_ENV_VULKAN_1_1)) { CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06288"}); } } TEST_F(VkLayerTest, ShaderAtomicFloat) { TEST_DESCRIPTION("Test VK_EXT_shader_atomic_float."); SetTargetApiVersion(VK_API_VERSION_1_1); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } // Create device without VK_EXT_shader_atomic_float extension or features enabled ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); VkPhysicalDeviceFeatures available_features = {}; ASSERT_NO_FATAL_FAILURE(GetPhysicalDeviceFeatures(&available_features)); ASSERT_NO_FATAL_FAILURE(InitState()); if (m_device->props.apiVersion < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required for SPIR-V 1.3, skipping test.\n", kSkipPrefix); return; } // clang-format off std::string cs_32_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float : enable #extension GL_KHR_memory_scope_semantics : enable #extension GL_EXT_shader_explicit_arithmetic_types_float32 : enable shared float32_t x; layout(set = 0, binding = 0) buffer ssbo { float32_t y; }; void main() { )glsl"; std::string cs_buffer_float_32_add = cs_32_base + R"glsl( atomicAdd(y, 1); } )glsl"; std::string cs_buffer_float_32_load = cs_32_base + R"glsl( y = 1 + atomicLoad(y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_buffer_float_32_store = cs_32_base + R"glsl( float32_t a = 1; atomicStore(y, a, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_buffer_float_32_exchange = cs_32_base + R"glsl( float32_t a = 1; atomicExchange(y, a); } )glsl"; std::string cs_shared_float_32_add = cs_32_base + R"glsl( y = atomicAdd(x, 1); } )glsl"; std::string cs_shared_float_32_load = cs_32_base + R"glsl( y = 1 + atomicLoad(x, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_shared_float_32_store = cs_32_base + R"glsl( atomicStore(x, y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_shared_float_32_exchange = cs_32_base + R"glsl( float32_t a = 1; atomicExchange(x, y); } )glsl"; std::string cs_64_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float : enable #extension GL_KHR_memory_scope_semantics : enable #extension GL_EXT_shader_explicit_arithmetic_types_float64 : enable shared float64_t x; layout(set = 0, binding = 0) buffer ssbo { float64_t y; }; void main() { )glsl"; std::string cs_buffer_float_64_add = cs_64_base + R"glsl( atomicAdd(y, 1); } )glsl"; std::string cs_buffer_float_64_load = cs_64_base + R"glsl( y = 1 + atomicLoad(y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_buffer_float_64_store = cs_64_base + R"glsl( float64_t a = 1; atomicStore(y, a, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_buffer_float_64_exchange = cs_64_base + R"glsl( float64_t a = 1; atomicExchange(y, a); } )glsl"; std::string cs_shared_float_64_add = cs_64_base + R"glsl( y = atomicAdd(x, 1); } )glsl"; std::string cs_shared_float_64_load = cs_64_base + R"glsl( y = 1 + atomicLoad(x, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_shared_float_64_store = cs_64_base + R"glsl( atomicStore(x, y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_shared_float_64_exchange = cs_64_base + R"glsl( float64_t a = 1; atomicExchange(x, y); } )glsl"; std::string cs_image_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float : enable #extension GL_KHR_memory_scope_semantics : enable layout(set = 0, binding = 0) buffer ssbo { float y; }; layout(set = 0, binding = 1, r32f) uniform image2D z; void main() { )glsl"; std::string cs_image_load = cs_image_base + R"glsl( y = imageAtomicLoad(z, ivec2(1, 1), gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed); } )glsl"; std::string cs_image_store = cs_image_base + R"glsl( imageAtomicStore(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed); } )glsl"; std::string cs_image_exchange = cs_image_base + R"glsl( imageAtomicExchange(z, ivec2(1, 1), y, gl_ScopeDevice, gl_StorageSemanticsImage, gl_SemanticsRelaxed); } )glsl"; std::string cs_image_add = cs_image_base + R"glsl( y = imageAtomicAdd(z, ivec2(1, 1), y); } )glsl"; // clang-format on const char *current_shader = nullptr; // set binding for buffer tests std::vector<VkDescriptorSetLayoutBinding> current_bindings = { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_ = layer_data::make_unique<VkShaderObj>(this, "", VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_1, SPV_SOURCE_GLSL_TRY); // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class if (VK_SUCCESS != helper.cs_.get()->InitFromGLSLTry(current_shader, false, SPV_ENV_VULKAN_1_1)) { helper.override_skip_ = true; } helper.dsl_bindings_ = current_bindings; }; // shaderBufferFloat32Atomics current_shader = cs_buffer_float_32_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); current_shader = cs_buffer_float_32_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); current_shader = cs_buffer_float_32_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); // shaderBufferFloat32AtomicAdd current_shader = cs_buffer_float_32_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); // shaderSharedFloat32Atomics current_shader = cs_shared_float_32_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); current_shader = cs_shared_float_32_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); current_shader = cs_shared_float_32_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); // shaderSharedFloat32AtomicAdd current_shader = cs_shared_float_32_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); // shaderBufferFloat64Atomics if (available_features.shaderFloat64) { current_shader = cs_buffer_float_64_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); current_shader = cs_buffer_float_64_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); current_shader = cs_buffer_float_64_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); // shaderBufferFloat64AtomicAdd current_shader = cs_buffer_float_64_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); // shaderSharedFloat64Atomics current_shader = cs_shared_float_64_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); current_shader = cs_shared_float_64_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); current_shader = cs_shared_float_64_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); // shaderSharedFloat64AtomicAdd current_shader = cs_shared_float_64_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); } else { printf("Skipping 64-bit float tests\n"); } // Add binding for images current_bindings.push_back({1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}); // shaderImageFloat32Atomics current_shader = cs_image_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06282"); current_shader = cs_image_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06282"); current_shader = cs_image_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06282"); // shaderImageFloat32AtomicAdd current_shader = cs_image_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-01091", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06282"}); } TEST_F(VkLayerTest, ShaderAtomicFloat2) { TEST_DESCRIPTION("Test VK_EXT_shader_atomic_float2."); SetTargetApiVersion(VK_API_VERSION_1_2); // Create device without VK_EXT_shader_atomic_float2 extension or features enabled ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Test requires Vulkan >= 1.2.\n", kSkipPrefix); return; } // Still check for proper 16-bit storage/float support for most tests auto float16int8_features = LvlInitStruct<VkPhysicalDeviceShaderFloat16Int8Features>(); auto storage_16_bit_features = LvlInitStruct<VkPhysicalDevice16BitStorageFeatures>(&float16int8_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&storage_16_bit_features); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); const bool support_16_bit = (float16int8_features.shaderFloat16 == VK_TRUE) && (storage_16_bit_features.storageBuffer16BitAccess == VK_TRUE); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); // clang-format off std::string cs_16_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float2 : enable #extension GL_EXT_shader_explicit_arithmetic_types_float16 : enable #extension GL_EXT_shader_16bit_storage: enable #extension GL_KHR_memory_scope_semantics : enable shared float16_t x; layout(set = 0, binding = 0) buffer ssbo { float16_t y; }; void main() { )glsl"; std::string cs_buffer_float_16_add = cs_16_base + R"glsl( atomicAdd(y, float16_t(1.0)); } )glsl"; std::string cs_buffer_float_16_load = cs_16_base + R"glsl( y = float16_t(1.0) + atomicLoad(y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_buffer_float_16_store = cs_16_base + R"glsl( float16_t a = float16_t(1.0); atomicStore(y, a, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_buffer_float_16_exchange = cs_16_base + R"glsl( float16_t a = float16_t(1.0); atomicExchange(y, a); } )glsl"; std::string cs_buffer_float_16_min = cs_16_base + R"glsl( atomicMin(y, float16_t(1.0)); } )glsl"; std::string cs_buffer_float_16_max = cs_16_base + R"glsl( atomicMax(y, float16_t(1.0)); } )glsl"; std::string cs_shared_float_16_add = cs_16_base + R"glsl( y = atomicAdd(x, float16_t(1.0)); } )glsl"; std::string cs_shared_float_16_load = cs_16_base + R"glsl( y = float16_t(1.0) + atomicLoad(x, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_shared_float_16_store = cs_16_base + R"glsl( atomicStore(x, y, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; std::string cs_shared_float_16_exchange = cs_16_base + R"glsl( float16_t a = float16_t(1.0); atomicExchange(x, y); } )glsl"; std::string cs_shared_float_16_min = cs_16_base + R"glsl( y = atomicMin(x, float16_t(1.0)); } )glsl"; std::string cs_shared_float_16_max = cs_16_base + R"glsl( y = atomicMax(x, float16_t(1.0)); } )glsl"; std::string cs_32_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float2 : enable #extension GL_EXT_shader_explicit_arithmetic_types_float32 : enable shared float32_t x; layout(set = 0, binding = 0) buffer ssbo { float32_t y; }; void main() { )glsl"; std::string cs_buffer_float_32_min = cs_32_base + R"glsl( atomicMin(y, 1); } )glsl"; std::string cs_buffer_float_32_max = cs_32_base + R"glsl( atomicMax(y, 1); } )glsl"; std::string cs_shared_float_32_min = cs_32_base + R"glsl( y = atomicMin(x, 1); } )glsl"; std::string cs_shared_float_32_max = cs_32_base + R"glsl( y = atomicMax(x, 1); } )glsl"; std::string cs_64_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float2 : enable #extension GL_EXT_shader_explicit_arithmetic_types_float64 : enable shared float64_t x; layout(set = 0, binding = 0) buffer ssbo { float64_t y; }; void main() { )glsl"; std::string cs_buffer_float_64_min = cs_64_base + R"glsl( atomicMin(y, 1); } )glsl"; std::string cs_buffer_float_64_max = cs_64_base + R"glsl( atomicMax(y, 1); } )glsl"; std::string cs_shared_float_64_min = cs_64_base + R"glsl( y = atomicMin(x, 1); } )glsl"; std::string cs_shared_float_64_max = cs_64_base + R"glsl( y = atomicMax(x, 1); } )glsl"; std::string cs_image_32_base = R"glsl( #version 450 #extension GL_EXT_shader_atomic_float2 : enable layout(set = 0, binding = 0) buffer ssbo { float y; }; layout(set = 0, binding = 1, r32f) uniform image2D z; void main() { )glsl"; std::string cs_image_32_min = cs_image_32_base + R"glsl( y = imageAtomicMin(z, ivec2(1, 1), y); } )glsl"; std::string cs_image_32_max = cs_image_32_base + R"glsl( y = imageAtomicMax(z, ivec2(1, 1), y); } )glsl"; // clang-format on const char *current_shader = nullptr; // set binding for buffer tests std::vector<VkDescriptorSetLayoutBinding> current_bindings = { {0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; const auto set_info = [this, &current_shader, &current_bindings](CreateComputePipelineHelper &helper) { // Requires SPIR-V 1.3 for SPV_KHR_storage_buffer_storage_class m_errorMonitor->SetUnexpectedError("VUID-VkShaderModuleCreateInfo-pCode-01091"); helper.cs_ = VkShaderObj::CreateFromGLSL(*this, VK_SHADER_STAGE_COMPUTE_BIT, current_shader, "main", nullptr, SPV_ENV_VULKAN_1_1); // Skip the test if shader failed to compile helper.override_skip_ = !static_cast<bool>(helper.cs_); helper.dsl_bindings_ = current_bindings; }; if (support_16_bit) { // shaderBufferFloat16Atomics current_shader = cs_buffer_float_16_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); current_shader = cs_buffer_float_16_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); current_shader = cs_buffer_float_16_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06280"); // shaderBufferFloat16AtomicAdd current_shader = cs_buffer_float_16_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); // shaderBufferFloat16AtomicMinMax current_shader = cs_buffer_float_16_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); current_shader = cs_buffer_float_16_max.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); // shaderSharedFloat16Atomics current_shader = cs_shared_float_16_load.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); current_shader = cs_shared_float_16_store.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); current_shader = cs_shared_float_16_exchange.c_str(); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-None-06281"); // shaderSharedFloat16AtomicAdd current_shader = cs_shared_float_16_add.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); // shaderSharedFloat16AtomicMinMax current_shader = cs_shared_float_16_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); current_shader = cs_shared_float_16_max.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); } else { printf("Skipping 16-bit tests\n"); } // shaderBufferFloat32AtomicMinMax current_shader = cs_buffer_float_32_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); current_shader = cs_buffer_float_32_max.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); // shaderSharedFloat32AtomicMinMax current_shader = cs_shared_float_32_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); current_shader = cs_shared_float_32_max.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); if (features2.features.shaderFloat64 == VK_TRUE) { // shaderBufferFloat64AtomicMinMax current_shader = cs_buffer_float_64_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); current_shader = cs_buffer_float_64_max.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06280"}); // shaderSharedFloat64AtomicMinMax current_shader = cs_shared_float_64_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); current_shader = cs_shared_float_64_max.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06281"}); } else { printf("Skipping 64-bit float tests\n"); } // Add binding for images current_bindings.push_back({1, VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1, VK_SHADER_STAGE_ALL, nullptr}); // shaderSharedFloat32AtomicMinMax current_shader = cs_image_32_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06282"}); current_shader = cs_image_32_min.c_str(); CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-VkShaderModuleCreateInfo-pCode-04147", "VUID-RuntimeSpirv-None-06282"}); } TEST_F(VkLayerTest, BindLibraryPipeline) { TEST_DESCRIPTION("Test binding a pipeline that was created with library flag"); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME)) { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.InitState(); cs_pipeline.LateBindPipelineInfo(); cs_pipeline.cp_ci_.flags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR; cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_commandBuffer->begin(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-pipeline-03382"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, cs_pipeline.pipeline_); m_errorMonitor->VerifyFound(); m_commandBuffer->end(); } TEST_F(VkLayerTest, TestPipelineColorWriteCreateInfoEXT) { TEST_DESCRIPTION("Test VkPipelineColorWriteCreateInfoEXT in color blend state pNext"); if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME)) { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_COLOR_WRITE_ENABLE_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPipelineColorWriteCreateInfoEXT color_write = LvlInitStruct<VkPipelineColorWriteCreateInfoEXT>(); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.cb_ci_.pNext = &color_write; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorWriteCreateInfoEXT-attachmentCount-04802"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); std::vector<VkBool32> max_enabled(m_device->props.limits.maxColorAttachments + 1, VK_TRUE); color_write.attachmentCount = m_device->props.limits.maxColorAttachments + 1; color_write.pColorWriteEnables = max_enabled.data(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorWriteCreateInfoEXT-attachmentCount-06655"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); VkBool32 enabled = VK_FALSE; color_write.attachmentCount = 1; color_write.pColorWriteEnables = &enabled; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorWriteCreateInfoEXT-pAttachments-04801"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ColorBlendAdvanced) { TEST_DESCRIPTION("Test VkPipelineColorBlendAdvancedStateCreateInfoEXT with unsupported properties"); if (!InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!DeviceExtensionSupported(gpu(), nullptr, VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME)) { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT blend_operation_advanced_props = LvlInitStruct<VkPhysicalDeviceBlendOperationAdvancedPropertiesEXT>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&blend_operation_advanced_props); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); VkPipelineColorBlendAdvancedStateCreateInfoEXT color_blend_advanced = LvlInitStruct<VkPipelineColorBlendAdvancedStateCreateInfoEXT>(); color_blend_advanced.blendOverlap = VK_BLEND_OVERLAP_DISJOINT_EXT; color_blend_advanced.dstPremultiplied = VK_FALSE; color_blend_advanced.srcPremultiplied = VK_FALSE; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.cb_ci_.pNext = &color_blend_advanced; if (!blend_operation_advanced_props.advancedBlendCorrelatedOverlap) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-blendOverlap-01426"); } if (!blend_operation_advanced_props.advancedBlendNonPremultipliedDstColor) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-dstPremultiplied-01425"); } if (!blend_operation_advanced_props.advancedBlendNonPremultipliedSrcColor) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineColorBlendAdvancedStateCreateInfoEXT-srcPremultiplied-01424"); } pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ValidateVariableSampleLocations) { TEST_DESCRIPTION("Validate using VkPhysicalDeviceSampleLocationsPropertiesEXT"); if (!AddRequiredExtensions(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME)) { printf("%s Did not find required instance extension(s); skipped.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework()); if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkPhysicalDeviceSampleLocationsPropertiesEXT sample_locations = LvlInitStruct<VkPhysicalDeviceSampleLocationsPropertiesEXT>(); VkPhysicalDeviceProperties2 phys_props = LvlInitStruct<VkPhysicalDeviceProperties2>(&sample_locations); vk::GetPhysicalDeviceProperties2(gpu(), &phys_props); if (sample_locations.variableSampleLocations) { printf("%s VkPhysicalDeviceSampleLocationsPropertiesEXT::variableSampleLocations is supported, skipping.\n", kSkipPrefix); return; } PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT vkGetPhysicalDeviceMultisamplePropertiesEXT = (PFN_vkGetPhysicalDeviceMultisamplePropertiesEXT)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceMultisamplePropertiesEXT"); assert(vkGetPhysicalDeviceMultisamplePropertiesEXT != nullptr); VkAttachmentReference attach = {}; attach.layout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpass = {}; subpass.pColorAttachments = &attach; subpass.colorAttachmentCount = 1; VkAttachmentDescription attach_desc = {}; attach_desc.format = VK_FORMAT_R8G8B8A8_UNORM; attach_desc.samples = VK_SAMPLE_COUNT_1_BIT; attach_desc.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attach_desc.finalLayout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription subpasses[2] = {subpass, subpass}; VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(); rpci.subpassCount = 2; rpci.pSubpasses = subpasses; rpci.attachmentCount = 1; rpci.pAttachments = &attach_desc; VkRenderPass render_pass; vk::CreateRenderPass(device(), &rpci, NULL, &render_pass); VkImageObj image(m_device); image.Init(32, 32, 1, VK_FORMAT_R8G8B8A8_UNORM, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT, VK_IMAGE_TILING_OPTIMAL, 0); VkImageView image_view = image.targetView(VK_FORMAT_R8G8B8A8_UNORM); VkFramebufferCreateInfo framebuffer_info = LvlInitStruct<VkFramebufferCreateInfo>(); framebuffer_info.renderPass = render_pass; framebuffer_info.attachmentCount = 1; framebuffer_info.pAttachments = &image_view; framebuffer_info.width = 32; framebuffer_info.height = 32; framebuffer_info.layers = 1; VkFramebuffer framebuffer; vk::CreateFramebuffer(m_device->handle(), &framebuffer_info, nullptr, &framebuffer); VkMultisamplePropertiesEXT multisample_prop = {}; vkGetPhysicalDeviceMultisamplePropertiesEXT(gpu(), VK_SAMPLE_COUNT_1_BIT, &multisample_prop); const uint32_t valid_count = multisample_prop.maxSampleLocationGridSize.width * multisample_prop.maxSampleLocationGridSize.height; if (valid_count == 0) { printf("%s multisample properties are not supported, skipping.\n", kSkipPrefix); return; } std::vector<VkSampleLocationEXT> sample_location(valid_count, {0.5, 0.5}); VkSampleLocationsInfoEXT sample_locations_info = LvlInitStruct<VkSampleLocationsInfoEXT>(); sample_locations_info.sampleLocationsPerPixel = VK_SAMPLE_COUNT_1_BIT; sample_locations_info.sampleLocationGridSize = multisample_prop.maxSampleLocationGridSize; sample_locations_info.sampleLocationsCount = valid_count; sample_locations_info.pSampleLocations = sample_location.data(); VkPipelineSampleLocationsStateCreateInfoEXT sample_locations_state = LvlInitStruct<VkPipelineSampleLocationsStateCreateInfoEXT>(); sample_locations_state.sampleLocationsEnable = VK_TRUE; sample_locations_state.sampleLocationsInfo = sample_locations_info; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.gp_ci_.pNext = &sample_locations_state; pipe.gp_ci_.renderPass = render_pass; pipe.CreateGraphicsPipeline(); VkClearValue clear_value; clear_value.color.float32[0] = 0.25f; clear_value.color.float32[1] = 0.25f; clear_value.color.float32[2] = 0.25f; clear_value.color.float32[3] = 0.0f; VkAttachmentSampleLocationsEXT attachment_sample_locations; attachment_sample_locations.attachmentIndex = 0; attachment_sample_locations.sampleLocationsInfo = sample_locations_info; VkSubpassSampleLocationsEXT subpass_sample_locations; subpass_sample_locations.subpassIndex = 0; subpass_sample_locations.sampleLocationsInfo = sample_locations_info; VkRenderPassSampleLocationsBeginInfoEXT render_pass_sample_locations = LvlInitStruct<VkRenderPassSampleLocationsBeginInfoEXT>(); render_pass_sample_locations.attachmentInitialSampleLocationsCount = 1; render_pass_sample_locations.pAttachmentInitialSampleLocations = &attachment_sample_locations; render_pass_sample_locations.postSubpassSampleLocationsCount = 1; render_pass_sample_locations.pPostSubpassSampleLocations = &subpass_sample_locations; sample_location[0].x = 0.0f; // Invalid, VkRenderPassSampleLocationsBeginInfoEXT wont match VkPipelineSampleLocationsStateCreateInfoEXT VkRenderPassBeginInfo begin_info = LvlInitStruct<VkRenderPassBeginInfo>(&render_pass_sample_locations); begin_info.renderPass = render_pass; begin_info.framebuffer = framebuffer; begin_info.renderArea.extent.width = 32; begin_info.renderArea.extent.height = 32; begin_info.renderArea.offset.x = 0; begin_info.renderArea.offset.y = 0; begin_info.clearValueCount = 1; begin_info.pClearValues = &clear_value; m_commandBuffer->begin(); vk::CmdBeginRenderPass(m_commandBuffer->handle(), &begin_info, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-variableSampleLocations-01525"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->VerifyFound(); vk::CmdNextSubpass(m_commandBuffer->handle(), VK_SUBPASS_CONTENTS_INLINE); sample_location[0].x = 0.5f; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-variableSampleLocations-01525"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->VerifyFound(); vk::CmdEndRenderPass(m_commandBuffer->handle()); begin_info.pNext = nullptr; // Invalid, missing VkRenderPassSampleLocationsBeginInfoEXT vk::CmdBeginRenderPass(m_commandBuffer->handle(), &begin_info, VK_SUBPASS_CONTENTS_INLINE); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdBindPipeline-variableSampleLocations-01525"); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->VerifyFound(); vk::CmdEndRenderPass(m_commandBuffer->handle()); m_commandBuffer->end(); } TEST_F(VkLayerTest, ComputeSharedMemoryOverLimit) { TEST_DESCRIPTION("Validate compute shader shared memory does not exceed maxComputeSharedMemorySize"); ASSERT_NO_FATAL_FAILURE(Init()); const uint32_t max_shared_memory_size = m_device->phy().properties().limits.maxComputeSharedMemorySize; const uint32_t max_shared_ints = max_shared_memory_size / 4; std::stringstream csSource; // Make sure compute pipeline has a compute shader stage set csSource << R"glsl( #version 450 shared int a[)glsl"; csSource << (max_shared_ints + 16); csSource << R"glsl(]; void main(){ } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource.str(), VK_SHADER_STAGE_COMPUTE_BIT)); pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-Workgroup-06530"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, ComputeSharedMemoryOverLimitWorkgroupMemoryExplicitLayout) { TEST_DESCRIPTION( "Validate compute shader shared memory does not exceed maxComputeSharedMemorySize when using " "VK_KHR_workgroup_memory_explicit_layout"); SetTargetApiVersion(VK_API_VERSION_1_2); AddRequiredExtensions(VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); // need at least SPIR-V 1.4 for SPV_KHR_workgroup_memory_explicit_layout if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Test requires Vulkan >= 1.2.\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported, skipping test.\n", kSkipPrefix, VK_KHR_WORKGROUP_MEMORY_EXPLICIT_LAYOUT_EXTENSION_NAME); return; } auto explicit_layout_features = LvlInitStruct<VkPhysicalDeviceWorkgroupMemoryExplicitLayoutFeaturesKHR>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&explicit_layout_features); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &explicit_layout_features)); if (!explicit_layout_features.workgroupMemoryExplicitLayout) { printf("%s workgroupMemoryExplicitLayout feature not supported.\n", kSkipPrefix); return; } const uint32_t max_shared_memory_size = m_device->phy().properties().limits.maxComputeSharedMemorySize; const uint32_t max_shared_ints = max_shared_memory_size / 4; std::stringstream csSource; csSource << R"glsl( #version 450 #extension GL_EXT_shared_memory_block : enable shared X { int x; }; shared Y { int y1[)glsl"; csSource << (max_shared_ints + 16); csSource << R"glsl(]; int y2; }; void main() { x = 0; // prevent dead-code elimination y2 = 0; } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource.str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_2)); pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-Workgroup-06530"); pipe.CreateComputePipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestInvalidShaderInputAndOutputComponents) { TEST_DESCRIPTION("Test invalid shader layout in and out with different components."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); { char const *vsSource = R"glsl( #version 450 layout(location = 0, component = 0) out float r; layout(location = 0, component = 2) out float b; void main() { r = 0.25f; b = 0.75f; } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); char const *fsSource = R"glsl( #version 450 layout(location = 0, component = 0) in vec3 rgb; layout (location = 0) out vec4 color; void main() { color = vec4(rgb, 1.0f); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit | kErrorBit, "UNASSIGNED-CoreValidation-Shader-InputNotProduced"); } { char const *vsSource = R"glsl( #version 450 layout(location = 0, component = 0) out vec3 v; void main() { } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); char const *fsSource = R"glsl( #version 450 layout(location = 0, component = 0) in float a; layout(location = 0, component = 2) in float b; layout (location = 0) out vec4 color; void main() { color = vec4(1.0f); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs.GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kPerformanceWarningBit | kErrorBit, "UNASSIGNED-CoreValidation-Shader-OutputNotConsumed"); } } TEST_F(VkLayerTest, SpecializationInvalidSizeMismatch) { TEST_DESCRIPTION("Make sure an error is logged when a specialization map entry's size is not correct with type"); SetTargetApiVersion(VK_API_VERSION_1_2); bool int8_support = false; bool float64_support = false; // require to make enable logic simpler if (InstanceExtensionSupported(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { m_instance_extension_names.push_back(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); } else { printf("%s Did not find required instance extension %s; skipped.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s test requires Vulkan 1.2+, skipping test\n", kSkipPrefix); return; } auto features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>(); features12.shaderInt8 = VK_TRUE; auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features12); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (features12.shaderInt8 == VK_TRUE) { int8_support = true; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (m_device->phy().features().shaderFloat64) { float64_support = true; } // layout (constant_id = 0) const int a = 3; // layout (constant_id = 1) const uint b = 3; // layout (constant_id = 2) const float c = 3.0f; // layout (constant_id = 3) const bool d = true; // layout (constant_id = 4) const bool f = false; std::string cs_src = R"( OpCapability Shader OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpDecorate %a SpecId 0 OpDecorate %b SpecId 1 OpDecorate %c SpecId 2 OpDecorate %d SpecId 3 OpDecorate %f SpecId 4 %void = OpTypeVoid %func = OpTypeFunction %void %int = OpTypeInt 32 1 %uint = OpTypeInt 32 0 %float = OpTypeFloat 32 %bool = OpTypeBool %a = OpSpecConstant %int 3 %b = OpSpecConstant %uint 3 %c = OpSpecConstant %float 3 %d = OpSpecConstantTrue %bool %f = OpSpecConstantFalse %bool %main = OpFunction %void None %func %label = OpLabel OpReturn OpFunctionEnd )"; // use same offset to keep simple since unused data being read VkSpecializationMapEntry entries[5] = { {0, 0, 4}, // OpTypeInt 32 {1, 0, 4}, // OpTypeInt 32 {2, 0, 4}, // OpTypeFloat 32 {3, 0, sizeof(VkBool32)}, // OpTypeBool {4, 0, sizeof(VkBool32)} // OpTypeBool }; std::array<int32_t, 4> data; // enough garbage data to grab from VkSpecializationInfo specialization_info = { 5, entries, data.size() * sizeof(decltype(data)::value_type), data.data(), }; std::unique_ptr<VkShaderObj> cs; const auto set_info = [&cs](CreateComputePipelineHelper &helper) { helper.cs_ = std::move(cs); }; // Sanity check cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); if (cs) { CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit | kWarningBit, "", true); // signed int mismatch entries[0].size = 0; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 2; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 8; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 4; // reset // unsigned int mismatch entries[1].size = 1; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[1].size = 8; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[1].size = 3; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[1].size = 4; // reset // float mismatch entries[2].size = 0; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[2].size = 8; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[2].size = 7; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[2].size = 4; // reset // bool mismatch entries[3].size = sizeof(VkBool32) / 2; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[3].size = sizeof(VkBool32) + 1; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); } if (int8_support == true) { // #extension GL_EXT_shader_explicit_arithmetic_types_int8 : enable // layout (constant_id = 0) const int8_t a = int8_t(3); // layout (constant_id = 1) const uint8_t b = uint8_t(3); cs_src = R"( OpCapability Shader OpCapability Int8 OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpSourceExtension "GL_EXT_shader_explicit_arithmetic_types_int8" OpDecorate %a SpecId 0 OpDecorate %b SpecId 1 %void = OpTypeVoid %func = OpTypeFunction %void %char = OpTypeInt 8 1 %uchar = OpTypeInt 8 0 %a = OpSpecConstant %char 3 %b = OpSpecConstant %uchar 3 %main = OpFunction %void None %func %label = OpLabel OpReturn OpFunctionEnd )"; specialization_info.mapEntryCount = 2; entries[0] = {0, 0, 1}; // OpTypeInt 8 entries[1] = {1, 0, 1}; // OpTypeInt 8 cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); if (cs) { // Sanity check CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit | kWarningBit, "", true); // signed int 8 mismatch entries[0].size = 0; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 2; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 4; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 1; // reset // unsigned int 8 mismatch entries[1].size = 0; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[1].size = 2; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[1].size = 4; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); } } if (float64_support == true) { // #extension GL_EXT_shader_explicit_arithmetic_types_float64 : enable // layout (constant_id = 0) const float64_t a = 3.0f; cs_src = R"( OpCapability Shader OpCapability Float64 OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpSourceExtension "GL_EXT_shader_explicit_arithmetic_types_float64" OpDecorate %a SpecId 0 %void = OpTypeVoid %func = OpTypeFunction %void %double = OpTypeFloat 64 %a = OpSpecConstant %double 3 %main = OpFunction %void None %func %label = OpLabel OpReturn OpFunctionEnd )"; specialization_info.mapEntryCount = 1; entries[0] = {0, 0, 8}; // OpTypeFloat 64 cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); if (cs) { // Sanity check CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit | kWarningBit, "", true); // float 64 mismatch entries[0].size = 1; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 2; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 4; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); entries[0].size = 16; cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, cs_src, "main", &specialization_info); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkSpecializationMapEntry-constantID-00776"); } } } TEST_F(VkLayerTest, ComputeWorkGroupSizeSpecConstant) { TEST_DESCRIPTION("Validate compute shader shared memory does not exceed maxComputeWorkGroupSize"); ASSERT_NO_FATAL_FAILURE(Init()); const VkPhysicalDeviceLimits limits = m_device->phy().properties().limits; // Make sure compute pipeline has a compute shader stage set const std::string cs_source = R"glsl( #version 450 layout(local_size_x_id = 3, local_size_y_id = 4) in; void main(){} )glsl"; VkSpecializationMapEntry entries[2]; entries[0].constantID = 3; entries[0].offset = 0; entries[0].size = sizeof(uint32_t); entries[1].constantID = 4; entries[1].offset = sizeof(uint32_t); entries[1].size = sizeof(uint32_t); uint32_t data[2] = { 1, limits.maxComputeWorkGroupSize[1] + 1, // Invalid }; VkSpecializationInfo specialization_info = {}; specialization_info.mapEntryCount = 2; specialization_info.pMapEntries = entries; specialization_info.dataSize = sizeof(uint32_t) * 2; specialization_info.pData = data; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, cs_source, VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_GLSL, &specialization_info)); }; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-y-06430"); data[0] = limits.maxComputeWorkGroupSize[0] + 1; // Invalid data[1] = 1; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06429"); data[0] = limits.maxComputeWorkGroupSize[0]; data[1] = limits.maxComputeWorkGroupSize[1]; if ((data[0] + data[1]) > limits.maxComputeWorkGroupInvocations) { CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06432"); } } TEST_F(VkLayerTest, ComputeWorkGroupSizeConstantDefault) { TEST_DESCRIPTION("Make sure constant are applied for maxComputeWorkGroupSize using WorkgroupSize"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t x_size_limit = m_device->props.limits.maxComputeWorkGroupSize[0]; std::stringstream spv_source; spv_source << R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize %void = OpTypeVoid %3 = OpTypeFunction %void %uint = OpTypeInt 32 0 %limit = OpConstant %uint )"; spv_source << std::to_string(x_size_limit + 1); spv_source << R"( %uint_1 = OpConstant %uint 1 %v3uint = OpTypeVector %uint 3 %gl_WorkGroupSize = OpConstantComposite %v3uint %limit %uint_1 %uint_1 %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source.str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06429"); } TEST_F(VkLayerTest, ComputeWorkGroupSizeSpecConstantDefault) { TEST_DESCRIPTION("Make sure spec constant are applied for maxComputeWorkGroupSize using WorkgroupSize"); ASSERT_NO_FATAL_FAILURE(Init()); uint32_t x_size_limit = m_device->props.limits.maxComputeWorkGroupSize[0]; std::stringstream spv_source; spv_source << R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpDecorate %limit SpecId 0 OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize %void = OpTypeVoid %3 = OpTypeFunction %void %uint = OpTypeInt 32 0 %limit = OpSpecConstant %uint )"; spv_source << std::to_string(x_size_limit + 1); spv_source << R"( %uint_1 = OpConstant %uint 1 %v3uint = OpTypeVector %uint 3 %gl_WorkGroupSize = OpSpecConstantComposite %v3uint %limit %uint_1 %uint_1 %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source.str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06429"); } TEST_F(VkLayerTest, ComputeWorkGroupSizeLocalSizeId) { TEST_DESCRIPTION("Validate LocalSizeId also triggers maxComputeWorkGroupSize limit"); SetTargetApiVersion(VK_API_VERSION_1_3); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_3) { printf("%s test requires Vulkan 1.3+, skipping test\n", kSkipPrefix); return; } auto features13 = LvlInitStruct<VkPhysicalDeviceVulkan13Features>(); features13.maintenance4 = VK_TRUE; // required to be supported in 1.3 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features13)); uint32_t x_size_limit = m_device->props.limits.maxComputeWorkGroupSize[0]; std::stringstream spv_source; spv_source << R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionModeId %main LocalSizeId %limit %uint_1 %uint_1 OpSource GLSL 450 %void = OpTypeVoid %3 = OpTypeFunction %void %uint = OpTypeInt 32 0 %limit = OpConstant %uint )"; spv_source << std::to_string(x_size_limit + 1); spv_source << R"( %uint_1 = OpConstant %uint 1 %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source.str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_3, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06429"); } TEST_F(VkLayerTest, ComputeWorkGroupSizeLocalSizeIdSpecConstantDefault) { TEST_DESCRIPTION("Validate LocalSizeId also triggers maxComputeWorkGroupSize limit with spec constants default"); SetTargetApiVersion(VK_API_VERSION_1_3); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_3) { printf("%s test requires Vulkan 1.3+, skipping test\n", kSkipPrefix); return; } auto features13 = LvlInitStruct<VkPhysicalDeviceVulkan13Features>(); features13.maintenance4 = VK_TRUE; // required to be supported in 1.3 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features13)); uint32_t x_size_limit = m_device->props.limits.maxComputeWorkGroupSize[0]; // layout(local_size_x_id = 18, local_size_z_id = 19) in; // layout(local_size_x = 32) in; std::stringstream spv_source; spv_source << R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionModeId %main LocalSizeId %spec_x %uint_1 %spec_z OpSource GLSL 450 OpDecorate %spec_x SpecId 18 OpDecorate %spec_z SpecId 19 %void = OpTypeVoid %3 = OpTypeFunction %void %uint = OpTypeInt 32 0 %spec_x = OpSpecConstant %uint )"; spv_source << std::to_string(x_size_limit + 1); spv_source << R"( %uint_1 = OpConstant %uint 1 %spec_z = OpSpecConstant %uint 1 %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source.str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_3, SPV_SOURCE_ASM)); }; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06429"); } TEST_F(VkLayerTest, ComputeWorkGroupSizeLocalSizeIdSpecConstantSet) { TEST_DESCRIPTION("Validate LocalSizeId also triggers maxComputeWorkGroupSize limit with spec constants"); SetTargetApiVersion(VK_API_VERSION_1_3); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_3) { printf("%s test requires Vulkan 1.3+, skipping test\n", kSkipPrefix); return; } auto features13 = LvlInitStruct<VkPhysicalDeviceVulkan13Features>(); features13.maintenance4 = VK_TRUE; // required to be supported in 1.3 ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features13)); uint32_t x_size_limit = m_device->props.limits.maxComputeWorkGroupSize[0]; // layout(local_size_x_id = 18, local_size_z_id = 19) in; // layout(local_size_x = 32) in; std::stringstream spv_source; spv_source << R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionModeId %main LocalSizeId %spec_x %uint_1 %spec_z OpSource GLSL 450 OpDecorate %spec_x SpecId 18 OpDecorate %spec_z SpecId 19 %void = OpTypeVoid %3 = OpTypeFunction %void %uint = OpTypeInt 32 0 %spec_x = OpSpecConstant %uint 32 %uint_1 = OpConstant %uint 1 %spec_z = OpSpecConstant %uint 1 %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; uint32_t data = x_size_limit + 1; VkSpecializationMapEntry entry; entry.constantID = 18; entry.offset = 0; entry.size = sizeof(uint32_t); VkSpecializationInfo specialization_info = {}; specialization_info.mapEntryCount = 1; specialization_info.pMapEntries = &entry; specialization_info.dataSize = sizeof(uint32_t); specialization_info.pData = &data; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_.reset(new VkShaderObj(this, spv_source.str(), VK_SHADER_STAGE_COMPUTE_BIT, SPV_ENV_VULKAN_1_3, SPV_SOURCE_ASM, &specialization_info)); }; m_errorMonitor->SetUnexpectedError("VUID-RuntimeSpirv-x-06432"); CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-x-06429"); } TEST_F(VkLayerTest, UsingRasterizationStateStreamExtWithoutEnabled) { TEST_DESCRIPTION("Test using TestRasterizationStateStreamCreateInfoEXT but it doesn't enable geometryStreams."); if (!AddRequiredExtensions(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME)) { printf("%s Required instance extension(s) not supported.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!AreRequestedExtensionsEnabled()) { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); return; } VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback_features = LvlInitStruct<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(); transform_feedback_features.geometryStreams = VK_FALSE; // Invalid // Extension enabled via VK_EXT_transform_feedback dependency auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&transform_feedback_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); CreatePipelineHelper pipe(*this); pipe.InitInfo(); auto rasterization_state_stream_ci = LvlInitStruct<VkPipelineRasterizationStateStreamCreateInfoEXT>(); pipe.rs_state_ci_.pNext = &rasterization_state_stream_ci; pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-geometryStreams-02324"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestPipelineRasterizationStateStreamCreateInfoEXT) { TEST_DESCRIPTION("Test using TestRasterizationStateStreamCreateInfoEXT with invalid rasterizationStream."); if (!AddRequiredExtensions(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME)) { printf("%s Instance extension(s) not supported.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!AreRequestedExtensionsEnabled()) { printf("%s test requires %s extension. Skipping.\n", kSkipPrefix, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); return; } VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback_features = LvlInitStruct<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(); transform_feedback_features.geometryStreams = VK_TRUE; // Extension enabled via dependencies VkPhysicalDeviceFeatures2KHR features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&transform_feedback_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkPhysicalDeviceTransformFeedbackPropertiesEXT transfer_feedback_props = LvlInitStruct<VkPhysicalDeviceTransformFeedbackPropertiesEXT>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&transfer_feedback_props); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); if (!transfer_feedback_props.transformFeedbackRasterizationStreamSelect && transfer_feedback_props.maxTransformFeedbackStreams == 0) { printf("%s VkPhysicalDeviceTransformFeedbackPropertiesEXT::transformFeedbackRasterizationStreamSelect is 0; skipped.\n", kSkipPrefix); return; } CreatePipelineHelper pipe(*this); pipe.InitInfo(); auto rasterization_state_stream_ci = LvlInitStruct<VkPipelineRasterizationStateStreamCreateInfoEXT>(); rasterization_state_stream_ci.rasterizationStream = transfer_feedback_props.maxTransformFeedbackStreams; pipe.rs_state_ci_.pNext = &rasterization_state_stream_ci; pipe.InitState(); if (transfer_feedback_props.transformFeedbackRasterizationStreamSelect) { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-rasterizationStream-02325"); } else { m_errorMonitor->SetDesiredFailureMsg(VK_DEBUG_REPORT_ERROR_BIT_EXT, "VUID-VkPipelineRasterizationStateStreamCreateInfoEXT-rasterizationStream-02326"); } pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NoUniformBufferStandardLayout10) { TEST_DESCRIPTION("Don't enable uniformBufferStandardLayout in Vulkan 1.0 and have spirv-val catch invalid shader"); SetTargetApiVersion(VK_API_VERSION_1_0); ASSERT_NO_FATAL_FAILURE(Init()); if (DeviceValidationVersion() > VK_API_VERSION_1_0) { printf("%s Tests requires Vulkan 1.0 only, skipping test\n", kSkipPrefix); return; } // layout(std430, set = 0, binding = 0) uniform ubo430 { // float floatArray430[8]; // }; const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpDecorate %_arr_float_uint_8 ArrayStride 4 OpMemberDecorate %ubo430 0 Offset 0 OpDecorate %ubo430 Block OpDecorate %_ DescriptorSet 0 OpDecorate %_ Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %uint = OpTypeInt 32 0 %uint_8 = OpConstant %uint 8 %_arr_float_uint_8 = OpTypeArray %float %uint_8 %ubo430 = OpTypeStruct %_arr_float_uint_8 %_ptr_Uniform_ubo430 = OpTypePointer Uniform %ubo430 %_ = OpVariable %_ptr_Uniform_ubo430 Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-InconsistentSpirv"); VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NoUniformBufferStandardLayout12) { TEST_DESCRIPTION( "Don't enable uniformBufferStandardLayout in Vulkan1.2 when VK_KHR_uniform_buffer_standard_layout was promoted"); SetTargetApiVersion(VK_API_VERSION_1_2); ASSERT_NO_FATAL_FAILURE(Init()); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Tests requires Vulkan 1.2+ only, skipping test\n", kSkipPrefix); return; } // layout(std430, set = 0, binding = 0) uniform ubo430 { // float floatArray430[8]; // }; const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpDecorate %_arr_float_uint_8 ArrayStride 4 OpMemberDecorate %ubo430 0 Offset 0 OpDecorate %ubo430 Block OpDecorate %_ DescriptorSet 0 OpDecorate %_ Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %uint = OpTypeInt 32 0 %uint_8 = OpConstant %uint 8 %_arr_float_uint_8 = OpTypeArray %float %uint_8 %ubo430 = OpTypeStruct %_arr_float_uint_8 %_ptr_Uniform_ubo430 = OpTypePointer Uniform %ubo430 %_ = OpVariable %_ptr_Uniform_ubo430 Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-InconsistentSpirv"); VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_2); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NoScalarBlockLayout10) { TEST_DESCRIPTION("Don't enable scalarBlockLayout in Vulkan 1.0 and have spirv-val catch invalid shader"); SetTargetApiVersion(VK_API_VERSION_1_0); ASSERT_NO_FATAL_FAILURE(Init()); if (DeviceValidationVersion() > VK_API_VERSION_1_0) { printf("%s Tests requires Vulkan 1.0 only, skipping test\n", kSkipPrefix); return; } // layout (scalar, set = 0, binding = 0) buffer ssbo { // layout(offset = 4) vec3 x; // }; // // Note: using BufferBlock for Vulkan 1.0 // Note: Relaxed Block Layout would also make this valid if enabled const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpMemberDecorate %ssbo 0 Offset 4 OpDecorate %ssbo BufferBlock OpDecorate %_ DescriptorSet 0 OpDecorate %_ Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %ssbo = OpTypeStruct %v3float %_ptr_Uniform_ssbo = OpTypePointer Uniform %ssbo %_ = OpVariable %_ptr_Uniform_ssbo Uniform %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-InconsistentSpirv"); VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_0); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, NoScalarBlockLayout12) { TEST_DESCRIPTION("Don't enable scalarBlockLayout in Vulkan1.2 when VK_EXT_scalar_block_layout was promoted"); SetTargetApiVersion(VK_API_VERSION_1_2); ASSERT_NO_FATAL_FAILURE(Init()); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Tests requires Vulkan 1.2+ only, skipping test\n", kSkipPrefix); return; } // layout (scalar, set = 0, binding = 0) buffer ssbo { // layout(offset = 0) vec3 a; // layout(offset = 12) vec2 b; // }; const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" %_ OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpMemberDecorate %ssbo 0 Offset 0 OpMemberDecorate %ssbo 1 Offset 12 OpDecorate %ssbo Block OpDecorate %_ DescriptorSet 0 OpDecorate %_ Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v3float = OpTypeVector %float 3 %v2float = OpTypeVector %float 2 %ssbo = OpTypeStruct %v3float %v2float %_ptr_StorageBuffer_ssbo = OpTypePointer StorageBuffer %ssbo %_ = OpVariable %_ptr_StorageBuffer_ssbo StorageBuffer %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "UNASSIGNED-CoreValidation-Shader-InconsistentSpirv"); VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr, SPV_ENV_VULKAN_1_2); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestWrongPipelineType) { TEST_DESCRIPTION("Use a compute pipeline in GetRayTracingShaderGroupStackSizeKHR"); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix); return; } if (DeviceExtensionSupported(gpu(), nullptr, VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME)) { m_device_extension_names.push_back(VK_KHR_SHADER_FLOAT_CONTROLS_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_SPIRV_1_4_EXTENSION_NAME); m_device_extension_names.push_back(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME); m_device_extension_names.push_back(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); pipe.CreateComputePipeline(); PFN_vkGetRayTracingShaderGroupStackSizeKHR vkGetRayTracingShaderGroupStackSizeKHR = (PFN_vkGetRayTracingShaderGroupStackSizeKHR)vk::GetInstanceProcAddr(instance(), "vkGetRayTracingShaderGroupStackSizeKHR"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkGetRayTracingShaderGroupStackSizeKHR-pipeline-04622"); vkGetRayTracingShaderGroupStackSizeKHR(device(), pipe.pipeline_, 0, VK_SHADER_GROUP_SHADER_GENERAL_KHR); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestPipelineRasterizationConservativeStateCreateInfo) { TEST_DESCRIPTION("Test PipelineRasterizationConservativeStateCreateInfo."); AddRequiredExtensions(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!AreRequestedExtensionsEnabled()) { printf("%s %s is not supported; skipping\n", kSkipPrefix, VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME); return; } m_device_extension_names.push_back(VK_EXT_CONSERVATIVE_RASTERIZATION_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); VkPhysicalDeviceConservativeRasterizationPropertiesEXT conservative_rasterization_props = LvlInitStruct<VkPhysicalDeviceConservativeRasterizationPropertiesEXT>(); VkPhysicalDeviceProperties2KHR properties2 = LvlInitStruct<VkPhysicalDeviceProperties2KHR>(&conservative_rasterization_props); vkGetPhysicalDeviceProperties2KHR(gpu(), &properties2); VkPipelineRasterizationConservativeStateCreateInfoEXT conservative_state = LvlInitStruct<VkPipelineRasterizationConservativeStateCreateInfoEXT>(); conservative_state.extraPrimitiveOverestimationSize = -1.0f; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.rs_state_ci_.pNext = &conservative_state; pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg( kErrorBit, "VUID-VkPipelineRasterizationConservativeStateCreateInfoEXT-extraPrimitiveOverestimationSize-01769"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); conservative_state.extraPrimitiveOverestimationSize = conservative_rasterization_props.maxExtraPrimitiveOverestimationSize + 0.1f; m_errorMonitor->SetDesiredFailureMsg( kErrorBit, "VUID-VkPipelineRasterizationConservativeStateCreateInfoEXT-extraPrimitiveOverestimationSize-01769"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestShaderZeroInitializeWorkgroupMemory) { TEST_DESCRIPTION("Test initializing workgroup memory in compute shader"); AddRequiredExtensions(VK_KHR_ZERO_INITIALIZE_WORKGROUP_MEMORY_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); bool zero_initialize_workgroup_memory = AreRequestedExtensionsEnabled(); auto zero_initialize_work_group_memory_features = LvlInitStruct<VkPhysicalDeviceZeroInitializeWorkgroupMemoryFeaturesKHR>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&zero_initialize_work_group_memory_features); if (zero_initialize_workgroup_memory) { features2.pNext = &zero_initialize_work_group_memory_features; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 OpName %main "main" OpName %counter "counter" %void = OpTypeVoid %3 = OpTypeFunction %void %uint = OpTypeInt 32 0 %_ptr_Workgroup_uint = OpTypePointer Workgroup %uint %zero_uint = OpConstantNull %uint %counter = OpVariable %_ptr_Workgroup_uint Workgroup %zero_uint %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )"; auto cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr); const auto set_info = [&cs](CreateComputePipelineHelper &helper) { helper.cs_ = std::move(cs); }; if (cs) { const char *vuid = zero_initialize_workgroup_memory ? "VUID-RuntimeSpirv-shaderZeroInitializeWorkgroupMemory-06372" : "VUID-RuntimeSpirv-OpVariable-06373"; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, vuid); } } TEST_F(VkLayerTest, TestRuntimeSpirvTransformFeedback) { TEST_DESCRIPTION("Test runtime spirv transform feedback."); SetTargetApiVersion(VK_API_VERSION_1_2); AddRequiredExtensions(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Vulkan 1.2 not supported, skipping test.\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s not supported, skipping test.\n", kSkipPrefix, VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); return; } // Test currently crashes with valid SPIR-V // Using EmitStreamVertex() with transfer_feedback_props.maxTransformFeedbackStreams if (IsDriver(VK_DRIVER_ID_AMD_PROPRIETARY)) { printf("%s Test does not run on AMD proprietary driver, skipping tests\n", kSkipPrefix); return; } VkPhysicalDeviceTransformFeedbackFeaturesEXT transform_feedback_features = LvlInitStruct<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(); transform_feedback_features.transformFeedback = VK_TRUE; transform_feedback_features.geometryStreams = VK_TRUE; VkPhysicalDeviceVulkan12Features features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>(&transform_feedback_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features12); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (features2.features.geometryShader == VK_FALSE) { printf("%s Device does not support the required geometry shader features; skipped.\n", kSkipPrefix); return; } if (!transform_feedback_features.transformFeedback || !transform_feedback_features.geometryStreams) { printf("%s transformFeedback or geometryStreams feature is not supported, skipping test.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); PFN_vkGetPhysicalDeviceProperties2KHR vkGetPhysicalDeviceProperties2KHR = (PFN_vkGetPhysicalDeviceProperties2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceProperties2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceProperties2KHR != nullptr); VkPhysicalDeviceTransformFeedbackPropertiesEXT transform_feedback_props = LvlInitStruct<VkPhysicalDeviceTransformFeedbackPropertiesEXT>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&transform_feedback_props); vkGetPhysicalDeviceProperties2KHR(gpu(), &pd_props2); // seen sometimes when using devsim and will crash if (transform_feedback_props.maxTransformFeedbackStreams == 0) { printf("%s maxTransformFeedbackStreams is zero, skipping test.\n", kSkipPrefix); return; } { std::stringstream vsSource; vsSource << R"asm( OpCapability Shader OpCapability TransformFeedback %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Vertex %main "main" %tf OpExecutionMode %main Xfb ; Debug Information OpSource GLSL 450 OpName %main "main" ; id %4 OpName %tf "tf" ; id %8 ; Annotations OpDecorate %tf Location 0 OpDecorate %tf XfbBuffer 0 OpDecorate %tf XfbStride )asm"; vsSource << transform_feedback_props.maxTransformFeedbackBufferDataStride + 4; vsSource << R"asm( ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %_ptr_Output_float = OpTypePointer Output %float %tf = OpVariable %_ptr_Output_float Output ; Function main %main = OpFunction %void None %3 %5 = OpLabel OpReturn OpFunctionEnd )asm"; auto vs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_VERTEX_BIT, vsSource.str().c_str(), "main", nullptr); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {vs->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-XfbStride-06313"); } { std::stringstream gsSource; gsSource << R"asm( OpCapability Geometry OpCapability TransformFeedback OpCapability GeometryStreams %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" %tf OpExecutionMode %main Xfb OpExecutionMode %main Triangles OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputTriangleStrip OpExecutionMode %main OutputVertices 1 ; Debug Information OpSource GLSL 450 OpName %main "main" ; id %4 OpName %tf "tf" ; id %10 ; Annotations OpDecorate %tf Location 0 OpDecorate %tf Stream 0 OpDecorate %tf XfbBuffer 0 OpDecorate %tf XfbStride 0 ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %int = OpTypeInt 32 1 %int_17 = OpConstant %int )asm"; gsSource << transform_feedback_props.maxTransformFeedbackStreams; gsSource << R"asm( %float = OpTypeFloat 32 %_ptr_Output_float = OpTypePointer Output %float %tf = OpVariable %_ptr_Output_float Output ; Function main %main = OpFunction %void None %3 %5 = OpLabel OpEmitStreamVertex %int_17 OpReturn OpFunctionEnd )asm"; auto gs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_GEOMETRY_BIT, gsSource.str().c_str(), "main", nullptr); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-OpEmitStreamVertex-06310"); } if (transform_feedback_props.transformFeedbackStreamsLinesTriangles == VK_FALSE) { const char *gsSource = R"asm( OpCapability Geometry OpCapability TransformFeedback OpCapability GeometryStreams %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" %a %b OpExecutionMode %main Xfb OpExecutionMode %main Triangles OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputLineStrip OpExecutionMode %main OutputVertices 6 ; Debug Information OpSource GLSL 450 OpName %main "main" ; id %4 OpName %a "a" ; id %11 OpName %b "b" ; id %12 ; Annotations OpDecorate %a Location 0 OpDecorate %a Stream 0 OpDecorate %a XfbBuffer 0 OpDecorate %a XfbStride 4 OpDecorate %a Offset 0 OpDecorate %b Location 1 OpDecorate %b Stream 0 OpDecorate %b XfbBuffer 1 OpDecorate %b XfbStride 4 OpDecorate %b Offset 0 ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %int_1 = OpConstant %int 1 %float = OpTypeFloat 32 %_ptr_Output_float = OpTypePointer Output %float %a = OpVariable %_ptr_Output_float Output %b = OpVariable %_ptr_Output_float Output ; Function main %main = OpFunction %void None %3 %5 = OpLabel OpEmitStreamVertex %int_0 OpEmitStreamVertex %int_1 OpReturn OpFunctionEnd )asm"; auto gs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_GEOMETRY_BIT, gsSource, "main", nullptr); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-transformFeedbackStreamsLinesTriangles-06311"); } { std::stringstream gsSource; gsSource << R"asm( OpCapability Geometry OpCapability TransformFeedback OpCapability GeometryStreams %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" %a OpExecutionMode %main Xfb OpExecutionMode %main Triangles OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputLineStrip OpExecutionMode %main OutputVertices 6 ; Debug Information OpSource GLSL 450 OpName %main "main" ; id %4 OpName %a "a" ; id %10 ; Annotations OpDecorate %a Location 0 OpDecorate %a Stream 0 OpDecorate %a XfbBuffer 0 OpDecorate %a XfbStride 20 OpDecorate %a Offset )asm"; gsSource << transform_feedback_props.maxTransformFeedbackBufferDataSize; gsSource << R"asm( ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %float = OpTypeFloat 32 %_ptr_Output_float = OpTypePointer Output %float %a = OpVariable %_ptr_Output_float Output ; Function main %main = OpFunction %void None %3 %5 = OpLabel OpEmitStreamVertex %int_0 OpReturn OpFunctionEnd )asm"; auto gs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_GEOMETRY_BIT, gsSource.str().c_str(), "main", nullptr); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; std::vector<std::string> vuids = {"VUID-RuntimeSpirv-Offset-06308"}; if (transform_feedback_props.maxTransformFeedbackBufferDataSize + 4 >= transform_feedback_props.maxTransformFeedbackStreamDataSize) { vuids.push_back("VUID-RuntimeSpirv-XfbBuffer-06309"); } CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, vuids); } { std::stringstream gsSource; gsSource << R"asm( OpCapability Geometry OpCapability TransformFeedback OpCapability GeometryStreams %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" %a OpExecutionMode %main Xfb OpExecutionMode %main Triangles OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputLineStrip OpExecutionMode %main OutputVertices 6 ; Debug Information OpSource GLSL 450 OpName %main "main" ; id %4 OpName %a "a" ; id %10 ; Annotations OpDecorate %a Location 0 OpDecorate %a Stream )asm"; gsSource << transform_feedback_props.maxTransformFeedbackStreams; gsSource << R"asm( OpDecorate %a XfbBuffer 0 OpDecorate %a XfbStride 4 OpDecorate %a Offset 0 ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %float = OpTypeFloat 32 %_ptr_Output_float = OpTypePointer Output %float %a = OpVariable %_ptr_Output_float Output ; Function main %main = OpFunction %void None %3 %5 = OpLabel OpEmitStreamVertex %int_0 OpReturn OpFunctionEnd )asm"; auto gs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_GEOMETRY_BIT, gsSource.str().c_str(), "main", nullptr); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-Stream-06312"); } { uint32_t offset = transform_feedback_props.maxTransformFeedbackBufferDataSize / 2; uint32_t count = transform_feedback_props.maxTransformFeedbackStreamDataSize / offset + 1; // Limit to 25, because we are dynamically adding variables using letters as names if (count < 25) { std::stringstream gsSource; gsSource << R"asm( OpCapability Geometry OpCapability TransformFeedback OpCapability GeometryStreams %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Geometry %main "main" OpExecutionMode %main Xfb OpExecutionMode %main Triangles OpExecutionMode %main Invocations 1 OpExecutionMode %main OutputLineStrip OpExecutionMode %main OutputVertices 6 ; Debug Information OpSource GLSL 450 OpName %main "main" ; id %4)asm"; for (uint32_t i = 0; i < count; ++i) { char v = 'a' + i; gsSource << "\nOpName %var" << v << " \"" << v << "\""; } gsSource << "\n; Annotations\n"; for (uint32_t i = 0; i < count; ++i) { char v = 'a' + i; gsSource << "OpDecorate %var" << v << " Location " << i << "\n"; gsSource << "OpDecorate %var" << v << " Stream 0\n"; gsSource << "OpDecorate %var" << v << " XfbBuffer " << i << "\n"; gsSource << "OpDecorate %var" << v << " XfbStride 20\n"; gsSource << "OpDecorate %var" << v << " Offset " << offset << "\n"; } gsSource << R"asm( ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %int = OpTypeInt 32 1 %int_0 = OpConstant %int 0 %float = OpTypeFloat 32 %_ptr_Output_float = OpTypePointer Output %float)asm"; gsSource << "\n"; for (uint32_t i = 0; i < count; ++i) { char v = 'a' + i; gsSource << "%var" << v << " = OpVariable %_ptr_Output_float Output\n"; } gsSource << R"asm( ; Function main %main = OpFunction %void None %3 %5 = OpLabel OpEmitStreamVertex %int_0 OpReturn OpFunctionEnd )asm"; auto gs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_GEOMETRY_BIT, gsSource.str().c_str(), "main", nullptr); const auto set_info = [&](CreatePipelineHelper &helper) { helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), gs->GetStageCreateInfo(), helper.fs_->GetStageCreateInfo()}; }; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-XfbBuffer-06309"); } } } TEST_F(VkLayerTest, TestMinAndMaxTexelGatherOffset) { TEST_DESCRIPTION("Test shader with offset less than minTexelGatherOffset and greather than maxTexelGatherOffset"); ASSERT_NO_FATAL_FAILURE(Init()); if (m_device->phy().properties().limits.minTexelGatherOffset <= -100 || m_device->phy().properties().limits.maxTexelGatherOffset >= 100) { printf("%s test needs minTexelGatherOffset greater than -100 and maxTexelGatherOffset less than 100. Skipping.\n", kSkipPrefix); return; } const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint GLCompute %main "main" OpExecutionMode %main LocalSize 1 1 1 OpSource GLSL 450 ; Annotations OpDecorate %samp DescriptorSet 0 OpDecorate %samp Binding 0 ; Types, variables and constants %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v4float = OpTypeVector %float 4 %_ptr_Function_v4float = OpTypePointer Function %v4float %10 = OpTypeImage %float 2D 0 0 0 1 Unknown %11 = OpTypeSampledImage %10 %_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 %samp = OpVariable %_ptr_UniformConstant_11 UniformConstant %v2float = OpTypeVector %float 2 %float_0_5 = OpConstant %float 0.5 %17 = OpConstantComposite %v2float %float_0_5 %float_0_5 ; set up composite to be validated %uint = OpTypeInt 32 0 %int = OpTypeInt 32 1 %v2int = OpTypeVector %int 2 %int_n100 = OpConstant %int -100 %uint_n100 = OpConstant %uint 4294967196 %int_100 = OpConstant %int 100 %int_0 = OpConstant %int 0 %offset_100 = OpConstantComposite %v2int %int_n100 %int_100 %offset_n100 = OpConstantComposite %v2int %int_0 %uint_n100 ; Function main %main = OpFunction %void None %3 %5 = OpLabel %color = OpVariable %_ptr_Function_v4float Function %14 = OpLoad %11 %samp ; Should trigger min and max %24 = OpImageGather %v4float %14 %17 %int_0 ConstOffset %offset_100 ; Should only trigger max since uint %25 = OpImageGather %v4float %14 %17 %int_0 ConstOffset %offset_n100 OpStore %color %24 OpReturn OpFunctionEnd )"; OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_COMPUTE_BIT, nullptr}, }); auto cs = VkShaderObj::CreateFromASM(*this, VK_SHADER_STAGE_COMPUTE_BIT, spv_source, "main", nullptr); CreateComputePipelineHelper cs_pipeline(*this); cs_pipeline.InitInfo(); cs_pipeline.cs_ = std::move(cs); cs_pipeline.InitState(); cs_pipeline.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&descriptor_set.layout_}); cs_pipeline.LateBindPipelineInfo(); // as commented in SPIR-V should trigger the limits as following m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImage-06376"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImage-06377"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImage-06377"); cs_pipeline.CreateComputePipeline(true, false); // need false to prevent late binding m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestMinAndMaxTexelOffset) { TEST_DESCRIPTION("Test shader with offset less than minTexelOffset and greather than maxTexelOffset"); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); if (m_device->phy().properties().limits.minTexelOffset <= -100 || m_device->phy().properties().limits.maxTexelOffset >= 100) { printf("%s test needs minTexelOffset greater than -100 and maxTexelOffset less than 100. Skipping.\n", kSkipPrefix); return; } const std::string spv_source = R"( OpCapability Shader %1 = OpExtInstImport "GLSL.std.450" OpMemoryModel Logical GLSL450 OpEntryPoint Fragment %main "main" OpExecutionMode %main OriginUpperLeft OpSource GLSL 450 OpDecorate %textureSampler DescriptorSet 0 OpDecorate %textureSampler Binding 0 %void = OpTypeVoid %3 = OpTypeFunction %void %float = OpTypeFloat 32 %v4float = OpTypeVector %float 4 %_ptr_Function_v4float = OpTypePointer Function %v4float %10 = OpTypeImage %float 2D 0 0 0 1 Unknown %11 = OpTypeSampledImage %10 %_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 %textureSampler = OpVariable %_ptr_UniformConstant_11 UniformConstant %v2float = OpTypeVector %float 2 %float_0 = OpConstant %float 0 %17 = OpConstantComposite %v2float %float_0 %float_0 ; set up composite to be validated %uint = OpTypeInt 32 0 %int = OpTypeInt 32 1 %v2int = OpTypeVector %int 2 %int_0 = OpConstant %int 0 %int_n100 = OpConstant %int -100 %uint_n100 = OpConstant %uint 4294967196 %int_100 = OpConstant %int 100 %offset_100 = OpConstantComposite %v2int %int_n100 %int_100 %offset_n100 = OpConstantComposite %v2int %int_0 %uint_n100 %24 = OpConstantComposite %v2int %int_0 %int_0 %main = OpFunction %void None %3 %label = OpLabel %14 = OpLoad %11 %textureSampler %26 = OpImage %10 %14 ; Should trigger min and max %result0 = OpImageSampleImplicitLod %v4float %14 %17 ConstOffset %offset_100 %result1 = OpImageFetch %v4float %26 %24 ConstOffset %offset_100 ; Should only trigger max since uint %result2 = OpImageSampleImplicitLod %v4float %14 %17 ConstOffset %offset_n100 %result3 = OpImageFetch %v4float %26 %24 ConstOffset %offset_n100 OpReturn OpFunctionEnd )"; OneOffDescriptorSet descriptor_set(m_device, { {0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}, }); VkShaderObj const fs(this, spv_source, VK_SHADER_STAGE_FRAGMENT_BIT, SPV_ENV_VULKAN_1_0, SPV_SOURCE_ASM); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; pipe.InitState(); pipe.pipeline_layout_ = VkPipelineLayoutObj(m_device, {&descriptor_set.layout_}); // as commented in SPIR-V should trigger the limits as following // // OpImageSampleImplicitLod m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImageSample-06435"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImageSample-06436"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImageSample-06436"); // // OpImageFetch m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImageSample-06435"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImageSample-06436"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-OpImageSample-06436"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, RayTracingLibraryFlags) { TEST_DESCRIPTION("Validate ray tracing pipeline flags match library flags."); SetTargetApiVersion(VK_API_VERSION_1_1); if (!AddRequiredInstanceExtensions(VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME)) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (AddRequiredDeviceExtensions(VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME) && AddRequiredDeviceExtensions(VK_KHR_RAY_QUERY_EXTENSION_NAME) && AddRequiredDeviceExtensions(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME)) { AddRequiredDeviceExtensions(VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_BUFFER_DEVICE_ADDRESS_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_SPIRV_1_4_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_PIPELINE_LIBRARY_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_DEFERRED_HOST_OPERATIONS_EXTENSION_NAME); AddRequiredDeviceExtensions(VK_KHR_RAY_TRACING_PIPELINE_EXTENSION_NAME); } else { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_KHR_ACCELERATION_STRUCTURE_EXTENSION_NAME); return; } PFN_vkGetPhysicalDeviceFeatures2KHR vkGetPhysicalDeviceFeatures2KHR = (PFN_vkGetPhysicalDeviceFeatures2KHR)vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR"); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); auto ray_tracing_features = LvlInitStruct<VkPhysicalDeviceRayTracingPipelineFeaturesKHR>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&ray_tracing_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!ray_tracing_features.rayTracingPipeline) { printf("%s Feature rayTracing is not supported.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); const VkPipelineLayoutObj pipeline_layout(m_device, {}); const std::string ray_generation_shader = R"glsl( #version 460 core #extension GL_KHR_ray_tracing : enable void main() { } )glsl"; VkShaderObj rgen_shader(this, ray_generation_shader, VK_SHADER_STAGE_RAYGEN_BIT_NV); PFN_vkCreateRayTracingPipelinesKHR vkCreateRayTracingPipelinesKHR = reinterpret_cast<PFN_vkCreateRayTracingPipelinesKHR>(vk::GetInstanceProcAddr(instance(), "vkCreateRayTracingPipelinesKHR")); ASSERT_TRUE(vkCreateRayTracingPipelinesKHR != nullptr); VkPipelineShaderStageCreateInfo stage_create_info = LvlInitStruct<VkPipelineShaderStageCreateInfo>(); stage_create_info.stage = VK_SHADER_STAGE_RAYGEN_BIT_KHR; stage_create_info.module = rgen_shader.handle(); stage_create_info.pName = "main"; VkRayTracingShaderGroupCreateInfoKHR group_create_info = LvlInitStruct<VkRayTracingShaderGroupCreateInfoKHR>(); group_create_info.type = VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_KHR; group_create_info.generalShader = 0; group_create_info.closestHitShader = VK_SHADER_UNUSED_KHR; group_create_info.anyHitShader = VK_SHADER_UNUSED_KHR; group_create_info.intersectionShader = VK_SHADER_UNUSED_KHR; VkRayTracingPipelineInterfaceCreateInfoKHR interface_ci = LvlInitStruct<VkRayTracingPipelineInterfaceCreateInfoKHR>(); interface_ci.maxPipelineRayHitAttributeSize = 4; interface_ci.maxPipelineRayPayloadSize = 4; VkRayTracingPipelineCreateInfoKHR pipeline_ci = LvlInitStruct<VkRayTracingPipelineCreateInfoKHR>(); pipeline_ci.flags = VK_PIPELINE_CREATE_LIBRARY_BIT_KHR; pipeline_ci.stageCount = 1; pipeline_ci.pStages = &stage_create_info; pipeline_ci.groupCount = 1; pipeline_ci.pGroups = &group_create_info; pipeline_ci.layout = pipeline_layout.handle(); pipeline_ci.pLibraryInterface = &interface_ci; VkPipeline library = VK_NULL_HANDLE; VkPipeline invalid_library = VK_NULL_HANDLE; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &library); pipeline_ci.flags = 0; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &invalid_library); VkPipelineLibraryCreateInfoKHR library_ci = LvlInitStruct<VkPipelineLibraryCreateInfoKHR>(); library_ci.libraryCount = 1; library_ci.pLibraries = &library; pipeline_ci.pLibraryInfo = &library_ci; VkPipeline pipeline = VK_NULL_HANDLE; { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-04718"); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_AABBS_BIT_KHR; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-04719"); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_SKIP_TRIANGLES_BIT_KHR; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-04720"); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_ANY_HIT_SHADERS_BIT_KHR; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-04721"); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_CLOSEST_HIT_SHADERS_BIT_KHR; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-04722"); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_INTERSECTION_SHADERS_BIT_KHR; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkRayTracingPipelineCreateInfoKHR-flags-04723"); pipeline_ci.flags = VK_PIPELINE_CREATE_RAY_TRACING_NO_NULL_MISS_SHADERS_BIT_KHR; vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } { pipeline_ci.flags = 0; library_ci.pLibraries = &invalid_library; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLibraryCreateInfoKHR-pLibraries-03381"); vkCreateRayTracingPipelinesKHR(m_device->handle(), VK_NULL_HANDLE, VK_NULL_HANDLE, 1, &pipeline_ci, nullptr, &pipeline); m_errorMonitor->VerifyFound(); } vk::DestroyPipeline(m_device->handle(), library, nullptr); vk::DestroyPipeline(m_device->handle(), invalid_library, nullptr); } TEST_F(VkLayerTest, DeviceMemoryScope) { TEST_DESCRIPTION("Validate using Device memory scope in spirv."); SetTargetApiVersion(VK_API_VERSION_1_2); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Tests requires Vulkan 1.2+, skipping test.\n", kSkipPrefix); return; } auto features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features12); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); features12.vulkanMemoryModelDeviceScope = VK_FALSE; if (features12.vulkanMemoryModel == VK_FALSE) { printf("%s vulkanMemoryModel feature is not supported, skipping test.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); char const *csSource = R"glsl( #version 450 #extension GL_KHR_memory_scope_semantics : enable layout(set = 0, binding = 0) buffer ssbo { uint y; }; void main() { atomicStore(y, 1u, gl_ScopeDevice, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_ = layer_data::make_unique<VkShaderObj>(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreateComputePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-RuntimeSpirv-vulkanMemoryModel-06265"); } TEST_F(VkLayerTest, QueueFamilyMemoryScope) { TEST_DESCRIPTION("Validate using QueueFamily memory scope in spirv."); SetTargetApiVersion(VK_API_VERSION_1_2); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Tests requires Vulkan 1.2+, skipping test.\n", kSkipPrefix); return; } auto features12 = LvlInitStruct<VkPhysicalDeviceVulkan12Features>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&features12); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); features12.vulkanMemoryModel = VK_FALSE; if (features12.vulkanMemoryModelDeviceScope == VK_FALSE) { printf("%s vulkanMemoryModelDeviceScope feature is not supported, skipping test.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); char const *csSource = R"glsl( #version 450 #extension GL_KHR_memory_scope_semantics : enable layout(set = 0, binding = 0) buffer ssbo { uint y; }; void main() { atomicStore(y, 1u, gl_ScopeQueueFamily, gl_StorageSemanticsBuffer, gl_SemanticsRelaxed); } )glsl"; const auto set_info = [&](CreateComputePipelineHelper &helper) { helper.cs_ = layer_data::make_unique<VkShaderObj>(this, csSource, VK_SHADER_STAGE_COMPUTE_BIT); helper.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1, VK_SHADER_STAGE_ALL, nullptr}}; }; CreateComputePipelineHelper::OneshotTest( *this, set_info, kErrorBit, std::vector<string>{"VUID-RuntimeSpirv-vulkanMemoryModel-06266", "VUID-VkShaderModuleCreateInfo-pCode-01091"}); } TEST_F(VkLayerTest, CreatePipelineLayoutWithInvalidSetLayoutFlags) { TEST_DESCRIPTION("Validate setLayout flags in create pipeline layout."); AddRequiredExtensions(VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported, skipping test.\n", kSkipPrefix, VK_VALVE_MUTABLE_DESCRIPTOR_TYPE_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitState()); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 0; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT; layout_binding.pImmutableSamplers = nullptr; VkDescriptorSetLayoutCreateInfo ds_layout_ci = LvlInitStruct<VkDescriptorSetLayoutCreateInfo>(); ds_layout_ci.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_HOST_ONLY_POOL_BIT_VALVE; ds_layout_ci.bindingCount = 1; ds_layout_ci.pBindings = &layout_binding; vk_testing::DescriptorSetLayout ds_layout; ds_layout.init(*m_device, ds_layout_ci); VkDescriptorSetLayout ds_layout_handle = ds_layout.handle(); VkPipelineLayoutCreateInfo pipeline_layout_ci = LvlInitStruct<VkPipelineLayoutCreateInfo>(); pipeline_layout_ci.setLayoutCount = 1; pipeline_layout_ci.pSetLayouts = &ds_layout_handle; VkPipelineLayout pipeline_layout = VK_NULL_HANDLE; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineLayoutCreateInfo-pSetLayouts-04606"); vk::CreatePipelineLayout(m_device->device(), &pipeline_layout_ci, NULL, &pipeline_layout); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestUsingDisabledMultiviewFeatures) { TEST_DESCRIPTION("Create graphics pipeline using multiview features which are not enabled."); SetTargetApiVersion(VK_API_VERSION_1_2); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_2) { printf("%s Vulkan 1.2 not supported, skipping test\n", kSkipPrefix); return; } VkPhysicalDeviceMultiviewFeatures multiview_features = LvlInitStruct<VkPhysicalDeviceMultiviewFeatures>(); multiview_features.multiviewTessellationShader = VK_FALSE; multiview_features.multiviewGeometryShader = VK_FALSE; auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&multiview_features); features2.features.geometryShader = VK_TRUE; features2.features.tessellationShader = VK_TRUE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); VkAttachmentReference2 color_attachment = LvlInitStruct<VkAttachmentReference2>(); color_attachment.layout = VK_IMAGE_LAYOUT_GENERAL; VkAttachmentDescription2 description = LvlInitStruct<VkAttachmentDescription2>(); description.samples = VK_SAMPLE_COUNT_1_BIT; description.format = VK_FORMAT_B8G8R8A8_UNORM; description.finalLayout = VK_IMAGE_LAYOUT_GENERAL; VkSubpassDescription2 subpass = LvlInitStruct<VkSubpassDescription2>(); subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.viewMask = 0x3u; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &color_attachment; VkRenderPassCreateInfo2 rpci = LvlInitStruct<VkRenderPassCreateInfo2>(); rpci.attachmentCount = 1; rpci.pAttachments = &description; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; VkRenderPass render_pass; vk::CreateRenderPass2(m_device->device(), &rpci, nullptr, &render_pass); if (features2.features.tessellationShader) { char const *tcsSource = R"glsl( #version 450 layout(vertices=3) out; void main(){ gl_TessLevelOuter[0] = gl_TessLevelOuter[1] = gl_TessLevelOuter[2] = 1; gl_TessLevelInner[0] = 1; } )glsl"; char const *tesSource = R"glsl( #version 450 layout(triangles, equal_spacing, cw) in; void main(){ gl_Position.xyz = gl_TessCoord; gl_Position.w = 1.0f; } )glsl"; VkShaderObj tcs(this, tcsSource, VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT); VkShaderObj tes(this, tesSource, VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT); VkPipelineInputAssemblyStateCreateInfo iasci{VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO, nullptr, 0, VK_PRIMITIVE_TOPOLOGY_PATCH_LIST, VK_FALSE}; VkPipelineTessellationStateCreateInfo tsci{VK_STRUCTURE_TYPE_PIPELINE_TESSELLATION_STATE_CREATE_INFO, nullptr, 0, 3}; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.gp_ci_.renderPass = render_pass; pipe.gp_ci_.subpass = 0; pipe.cb_ci_.attachmentCount = 1; pipe.gp_ci_.pTessellationState = &tsci; pipe.gp_ci_.pInputAssemblyState = &iasci; pipe.shader_stages_.emplace_back(tcs.GetStageCreateInfo()); pipe.shader_stages_.emplace_back(tes.GetStageCreateInfo()); pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06047"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } if (features2.features.geometryShader) { static char const *gsSource = R"glsl( #version 450 layout (points) in; layout (triangle_strip) out; layout (max_vertices = 3) out; void main() { gl_Position = vec4(1.0, 0.5, 0.5, 0.0); EmitVertex(); } )glsl"; VkShaderObj vs(this, bindStateVertPointSizeShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj gs(this, gsSource, VK_SHADER_STAGE_GEOMETRY_BIT); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.gp_ci_.renderPass = render_pass; pipe.gp_ci_.subpass = 0; pipe.cb_ci_.attachmentCount = 1; pipe.shader_stages_ = {vs.GetStageCreateInfo(), gs.GetStageCreateInfo(), pipe.fs_->GetStageCreateInfo()}; pipe.InitState(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06048"); pipe.CreateGraphicsPipeline(); m_errorMonitor->VerifyFound(); } vk::DestroyRenderPass(m_device->device(), render_pass, nullptr); } TEST_F(VkLayerTest, ComputeImageLayout) { TEST_DESCRIPTION("Attempt to use an image with an invalid layout in a compute shader"); if (!AddRequiredExtensions(VK_KHR_DEVICE_GROUP_EXTENSION_NAME)) { printf("%s Required instance extensions not available\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(Init()); if (!AreRequestedExtensionsEnabled()) { printf("%s Required device extensions not available\n", kSkipPrefix); return; } auto vkCmdDispatchBaseKHR = reinterpret_cast<PFN_vkCmdDispatchBaseKHR>(vk::GetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR")); ASSERT_TRUE(vkCmdDispatchBaseKHR != nullptr); const char *cs = R"glsl(#version 450 layout(local_size_x=1) in; layout(set=0, binding=0) uniform sampler2D s; void main(){ vec4 v = 2.0 * texture(s, vec2(0.0)); } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_ = layer_data::make_unique<VkShaderObj>(this, cs, VK_SHADER_STAGE_COMPUTE_BIT); pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}}; pipe.InitState(); pipe.CreateComputePipeline(); const VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; VkImageObj image(m_device); image.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(fmt); VkSamplerObj sampler(m_device); pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler.handle(), VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); pipe.descriptor_set_->UpdateDescriptorSets(); { // Verify invalid image layout with CmdDispatch VkCommandBufferObj cmd(m_device, m_commandPool); cmd.begin(); vk::CmdBindDescriptorSets(cmd.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_.handle(), 0, 1, &pipe.descriptor_set_->set_, 0, nullptr); vk::CmdBindPipeline(cmd.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_); vk::CmdDispatch(cmd.handle(), 1, 1, 1); cmd.end(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, kVUID_Core_DrawState_InvalidImageLayout); cmd.QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } { // Verify invalid image layout with CmdDispatchBaseKHR VkCommandBufferObj cmd(m_device, m_commandPool); cmd.begin(); vk::CmdBindDescriptorSets(cmd.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_.handle(), 0, 1, &pipe.descriptor_set_->set_, 0, nullptr); vk::CmdBindPipeline(cmd.handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_); vkCmdDispatchBaseKHR(cmd.handle(), 0, 0, 0, 1, 1, 1); cmd.end(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, kVUID_Core_DrawState_InvalidImageLayout); cmd.QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } } TEST_F(VkLayerTest, ComputeImageLayout_1_1) { TEST_DESCRIPTION("Attempt to use an image with an invalid layout in a compute shader using vkCmdDispatchBase"); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(Init()); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s Test requires Vulkan +1.1\n", kSkipPrefix); return; } auto vkCmdDispatchBaseKHR = reinterpret_cast<PFN_vkCmdDispatchBaseKHR>(vk::GetInstanceProcAddr(instance(), "vkCmdDispatchBaseKHR")); ASSERT_TRUE(vkCmdDispatchBaseKHR != nullptr); const char *cs = R"glsl(#version 450 layout(local_size_x=1) in; layout(set=0, binding=0) uniform sampler2D s; void main(){ vec4 v = 2.0 * texture(s, vec2(0.0)); } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_ = layer_data::make_unique<VkShaderObj>(this, cs, VK_SHADER_STAGE_COMPUTE_BIT); pipe.dsl_bindings_ = {{0, VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1, VK_SHADER_STAGE_ALL, nullptr}}; pipe.InitState(); pipe.CreateComputePipeline(); const VkFormat fmt = VK_FORMAT_R8G8B8A8_UNORM; VkImageObj image(m_device); image.Init(64, 64, 1, fmt, VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_IMAGE_TILING_OPTIMAL, 0); ASSERT_TRUE(image.initialized()); VkImageView view = image.targetView(fmt); VkSamplerObj sampler(m_device); pipe.descriptor_set_->WriteDescriptorImageInfo(0, view, sampler.handle(), VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER); pipe.descriptor_set_->UpdateDescriptorSets(); m_commandBuffer->begin(); vk::CmdBindDescriptorSets(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_layout_.handle(), 0, 1, &pipe.descriptor_set_->set_, 0, nullptr); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_COMPUTE, pipe.pipeline_); vk::CmdDispatchBase(m_commandBuffer->handle(), 0, 0, 0, 1, 1, 1); m_commandBuffer->end(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, kVUID_Core_DrawState_InvalidImageLayout); m_commandBuffer->QueueCommandBuffer(false); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateGraphicsPipelineNullRenderPass) { TEST_DESCRIPTION("Test for a creating a pipeline with a null renderpass but VK_KHR_dynamic_rendering is not enabled"); m_errorMonitor->ExpectSuccess(); ASSERT_NO_FATAL_FAILURE(Init()); char const *fsSource = R"glsl( #version 450 layout(input_attachment_index=0, set=0, binding=0) uniform subpassInput x; layout(location=0) out vec4 color; void main() { color = subpassLoad(x); } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); auto create_info = LvlInitStruct<VkGraphicsPipelineCreateInfo>(); pipe.InitGraphicsPipelineCreateInfo(&create_info); m_errorMonitor->VerifyNotFound(); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06574"); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06603"); pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, CreateGraphicsPipelineRasterizationOrderAttachmentAccessWithoutFeature) { TEST_DESCRIPTION("Test for a creating a pipeline with VK_ARM_rasterization_order_attachment_access enabled"); m_errorMonitor->ExpectSuccess(); SetTargetApiVersion(VK_API_VERSION_1_2); AddRequiredExtensions(VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME); auto rasterization_order_features = LvlInitStruct<VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&rasterization_order_features); ASSERT_NO_FATAL_FAILURE(InitFrameworkAndRetrieveFeatures(features2)); if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME); return; } rasterization_order_features.rasterizationOrderColorAttachmentAccess = 0; rasterization_order_features.rasterizationOrderDepthAttachmentAccess = 0; rasterization_order_features.rasterizationOrderStencilAttachmentAccess = 0; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); m_errorMonitor->VerifyNotFound(); auto ds_ci = LvlInitStruct<VkPipelineDepthStencilStateCreateInfo>(); VkPipelineColorBlendAttachmentState cb_as = {}; auto cb_ci = LvlInitStruct<VkPipelineColorBlendStateCreateInfo>(); cb_ci.attachmentCount = 1; cb_ci.pAttachments = &cb_as; VkAttachmentDescription attachments[2] = {}; attachments[0].flags = 0; attachments[0].format = VK_FORMAT_B8G8R8A8_UNORM; attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attachments[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments[1].flags = 0; attachments[1].format = FindSupportedDepthStencilFormat(this->gpu()); attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cAttachRef = {}; cAttachRef.attachment = 0; cAttachRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference dsAttachRef = {}; dsAttachRef.attachment = 1; dsAttachRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &cAttachRef; subpass.pDepthStencilAttachment = &dsAttachRef; subpass.flags = VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_COLOR_ACCESS_BIT_ARM | VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM | VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM; VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(); rpci.attachmentCount = 2; rpci.pAttachments = attachments; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; vk_testing::RenderPass render_pass(*m_device, rpci); auto set_info = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pDepthStencilState = &ds_ci; helper.gp_ci_.pColorBlendState = &cb_ci; helper.gp_ci_.renderPass = render_pass.handle(); }; // Color attachment cb_ci.flags = VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM; ds_ci.flags = 0; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineColorBlendStateCreateInfo-rasterizationOrderColorAttachmentAccess-06465"); // Depth attachment cb_ci.flags = 0; ds_ci.flags = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineDepthStencilStateCreateInfo-rasterizationOrderDepthAttachmentAccess-06463"); // Stencil attachment cb_ci.flags = 0; ds_ci.flags = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM; CreatePipelineHelper::OneshotTest(*this, set_info, kErrorBit, "VUID-VkPipelineDepthStencilStateCreateInfo-rasterizationOrderStencilAttachmentAccess-06464"); } TEST_F(VkLayerTest, CreateGraphicsPipelineRasterizationOrderAttachmentAccessNoSubpassFlags) { TEST_DESCRIPTION("Test for a creating a pipeline with VK_ARM_rasterization_order_attachment_access enabled"); m_errorMonitor->ExpectSuccess(); SetTargetApiVersion(VK_API_VERSION_1_2); AddRequiredExtensions(VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME); auto rasterization_order_features = LvlInitStruct<VkPhysicalDeviceRasterizationOrderAttachmentAccessFeaturesARM>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&rasterization_order_features); ASSERT_NO_FATAL_FAILURE(InitFrameworkAndRetrieveFeatures(features2)); if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_ARM_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_EXTENSION_NAME); return; } if (!rasterization_order_features.rasterizationOrderColorAttachmentAccess && !rasterization_order_features.rasterizationOrderDepthAttachmentAccess && !rasterization_order_features.rasterizationOrderStencilAttachmentAccess) { printf("%s Test requires (unsupported) rasterizationOrderAttachmentAccess , skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); m_errorMonitor->VerifyNotFound(); auto ds_ci = LvlInitStruct<VkPipelineDepthStencilStateCreateInfo>(); VkPipelineColorBlendAttachmentState cb_as = {}; auto cb_ci = LvlInitStruct<VkPipelineColorBlendStateCreateInfo>(); cb_ci.attachmentCount = 1; cb_ci.pAttachments = &cb_as; VkRenderPass render_pass_handle = VK_NULL_HANDLE; auto create_render_pass = [&](VkPipelineDepthStencilStateCreateFlags subpass_flags, vk_testing::RenderPass &render_pass) { VkAttachmentDescription attachments[2] = {}; attachments[0].flags = 0; attachments[0].format = VK_FORMAT_B8G8R8A8_UNORM; attachments[0].samples = VK_SAMPLE_COUNT_1_BIT; attachments[0].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[0].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[0].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE; attachments[0].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[0].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attachments[0].finalLayout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; attachments[1].flags = 0; attachments[1].format = FindSupportedDepthStencilFormat(this->gpu()); attachments[1].samples = VK_SAMPLE_COUNT_1_BIT; attachments[1].storeOp = VK_ATTACHMENT_STORE_OP_STORE; attachments[1].stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE; attachments[1].loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR; attachments[1].initialLayout = VK_IMAGE_LAYOUT_UNDEFINED; attachments[1].finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkAttachmentReference cAttachRef = {}; cAttachRef.attachment = 0; cAttachRef.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; VkAttachmentReference dsAttachRef = {}; dsAttachRef.attachment = 1; dsAttachRef.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL; VkSubpassDescription subpass = {}; subpass.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS; subpass.colorAttachmentCount = 1; subpass.pColorAttachments = &cAttachRef; subpass.pDepthStencilAttachment = &dsAttachRef; subpass.flags = subpass_flags; VkRenderPassCreateInfo rpci = LvlInitStruct<VkRenderPassCreateInfo>(); rpci.attachmentCount = 2; rpci.pAttachments = attachments; rpci.subpassCount = 1; rpci.pSubpasses = &subpass; render_pass.init(*this->m_device, rpci); }; auto set_flgas_pipeline_createinfo = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pDepthStencilState = &ds_ci; helper.gp_ci_.pColorBlendState = &cb_ci; helper.gp_ci_.renderPass = render_pass_handle; }; vk_testing::RenderPass render_pass_no_flags; create_render_pass(0, render_pass_no_flags); render_pass_handle = render_pass_no_flags.handle(); // Color attachment if (rasterization_order_features.rasterizationOrderColorAttachmentAccess) { cb_ci.flags = VK_PIPELINE_COLOR_BLEND_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_ACCESS_BIT_ARM; ds_ci.flags = 0; // Expecting VUID-VkGraphicsPipelineCreateInfo-flags-06484 Error CreatePipelineHelper::OneshotTest(*this, set_flgas_pipeline_createinfo, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-06484"); } // Depth attachment if (rasterization_order_features.rasterizationOrderDepthAttachmentAccess) { cb_ci.flags = 0; ds_ci.flags = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM; // Expecting VUID-VkGraphicsPipelineCreateInfo-flags-06485 Error CreatePipelineHelper::OneshotTest(*this, set_flgas_pipeline_createinfo, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-06485"); } // Stencil attachment if (rasterization_order_features.rasterizationOrderStencilAttachmentAccess) { cb_ci.flags = 0; ds_ci.flags = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_STENCIL_ACCESS_BIT_ARM; // Expecting VUID-VkGraphicsPipelineCreateInfo-flags-06486 Error CreatePipelineHelper::OneshotTest(*this, set_flgas_pipeline_createinfo, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-06486"); } if (rasterization_order_features.rasterizationOrderDepthAttachmentAccess) { char const *fsSource = R"glsl( #version 450 layout(early_fragment_tests) in; layout(location = 0) out vec4 uFragColor; void main() { uFragColor = vec4(0,1,0,1); } )glsl"; VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); auto set_stages_pipeline_createinfo = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pDepthStencilState = &ds_ci; helper.gp_ci_.pColorBlendState = &cb_ci; helper.gp_ci_.renderPass = render_pass_handle; helper.shader_stages_ = {helper.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; }; cb_ci.flags = 0; ds_ci.flags = VK_PIPELINE_DEPTH_STENCIL_STATE_CREATE_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM; vk_testing::RenderPass render_pass; create_render_pass(VK_SUBPASS_DESCRIPTION_RASTERIZATION_ORDER_ATTACHMENT_DEPTH_ACCESS_BIT_ARM, render_pass); render_pass_handle = render_pass.handle(); CreatePipelineHelper::OneshotTest(*this, set_stages_pipeline_createinfo, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-flags-06591"); } } TEST_F(VkLayerTest, InvalidPipelineRenderingParameters) { TEST_DESCRIPTION("Test pipeline rendering formats and viewmask"); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (!AreRequestedExtensionsEnabled()) { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME); return; } if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } auto dynamic_rendering_features = LvlInitStruct<VkPhysicalDeviceDynamicRenderingFeaturesKHR>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&dynamic_rendering_features); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (!dynamic_rendering_features.dynamicRendering) { printf("%s Test requires (unsupported) dynamicRendering , skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); char const *fsSource = R"glsl( #version 450 layout(location=0) out vec4 color; void main() { color = vec4(1.0f); } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); auto pipeline_rendering_info = LvlInitStruct<VkPipelineRenderingCreateInfoKHR>(); auto create_info = LvlInitStruct<VkGraphicsPipelineCreateInfo>(); pipe.InitGraphicsPipelineCreateInfo(&create_info); create_info.pNext = &pipeline_rendering_info; auto depth_stencil_state = LvlInitStruct<VkPipelineDepthStencilStateCreateInfo>(); create_info.pDepthStencilState = &depth_stencil_state; VkFormat depth_format = VK_FORMAT_X8_D24_UNORM_PACK32; if (ImageFormatAndFeaturesSupported(gpu_, VK_FORMAT_D32_SFLOAT, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) { depth_format = VK_FORMAT_D32_SFLOAT; } VkFormat stencil_format = VK_FORMAT_D24_UNORM_S8_UINT; if (ImageFormatAndFeaturesSupported(gpu_, VK_FORMAT_D32_SFLOAT_S8_UINT, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) { stencil_format = VK_FORMAT_D32_SFLOAT_S8_UINT; } VkFormat color_formats = {depth_format}; pipeline_rendering_info.colorAttachmentCount = 1; pipeline_rendering_info.pColorAttachmentFormats = &color_formats; // Invalid color format m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06581"); pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); // Invalid color format array pipeline_rendering_info.pColorAttachmentFormats = nullptr; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06579"); pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); // Invalid depth format pipeline_rendering_info.colorAttachmentCount = 0; pipeline_rendering_info.pColorAttachmentFormats = &color_formats; pipeline_rendering_info.depthAttachmentFormat = VK_FORMAT_R8G8B8A8_UNORM; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06587"); // TODO (ncesario) Seems impossible hit 06585 without also hitting 06587. Since 06587 happens in stateless validation, 06585 // never gets triggered, though has been manually tested separately by removing 06587. // m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06585"); pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); // Invalid stecil format pipeline_rendering_info.depthAttachmentFormat = VK_FORMAT_UNDEFINED; pipeline_rendering_info.stencilAttachmentFormat = VK_FORMAT_R8G8B8A8_UNORM; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06588"); // TODO (ncesario) Same scenario as with 06585 and 06587 // m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06586"); pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); // mismatching depth/stencil formats pipeline_rendering_info.depthAttachmentFormat = depth_format; pipeline_rendering_info.stencilAttachmentFormat = stencil_format; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06589"); pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); // Non-zero viewMask color_formats = VK_FORMAT_R8G8B8A8_UNORM; pipeline_rendering_info.colorAttachmentCount = 1; pipeline_rendering_info.pColorAttachmentFormats = &color_formats; pipeline_rendering_info.depthAttachmentFormat = VK_FORMAT_UNDEFINED; pipeline_rendering_info.stencilAttachmentFormat = VK_FORMAT_UNDEFINED; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-multiview-06577"); pipeline_rendering_info.viewMask = 1; pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidPipelineRenderingViewMaskParameter) { TEST_DESCRIPTION("Test pipeline rendering viewmask maximum index"); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (!AreRequestedExtensionsEnabled()) { printf("%s %s Extension not supported, skipping tests\n", kSkipPrefix, VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME); return; } if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } auto multiview_features = LvlInitStruct<VkPhysicalDeviceMultiviewFeatures>(); multiview_features.multiview = VK_TRUE; auto dynamic_rendering_features = LvlInitStruct<VkPhysicalDeviceDynamicRenderingFeaturesKHR>(&multiview_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&dynamic_rendering_features); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (!dynamic_rendering_features.dynamicRendering) { printf("%s Test requires (unsupported) dynamicRendering , skipping\n", kSkipPrefix); return; } if (!multiview_features.multiview) { printf("%s Test requires (unsupported) multiview , skipping\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); char const *fsSource = R"glsl( #version 450 layout(location=0) out vec4 color; void main() { color = vec4(1.0f); } )glsl"; VkShaderObj vs(this, bindStateVertShaderText, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); pipe.AddDefaultColorAttachment(); VkDescriptorSetLayoutBinding dslb = {0, VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, 1, VK_SHADER_STAGE_FRAGMENT_BIT, nullptr}; const VkDescriptorSetLayoutObj dsl(m_device, {dslb}); const VkPipelineLayoutObj pl(m_device, {&dsl}); auto pipeline_rendering_info = LvlInitStruct<VkPipelineRenderingCreateInfoKHR>(); auto create_info = LvlInitStruct<VkGraphicsPipelineCreateInfo>(); pipe.InitGraphicsPipelineCreateInfo(&create_info); create_info.pNext = &pipeline_rendering_info; VkFormat color_formats = {VK_FORMAT_R8G8B8A8_UNORM}; pipeline_rendering_info.colorAttachmentCount = 1; pipeline_rendering_info.pColorAttachmentFormats = &color_formats; VkPhysicalDeviceMultiviewProperties multiview_props = LvlInitStruct<VkPhysicalDeviceMultiviewProperties>(); VkPhysicalDeviceProperties2 pd_props2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&multiview_props); vk::GetPhysicalDeviceProperties2(gpu(), &pd_props2); if (multiview_props.maxMultiviewViewCount == 32) { printf("%s VUID is not testable as maxMultiviewViewCount is 32, skipping test\n", kSkipPrefix); return; } m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06578"); pipeline_rendering_info.viewMask = 1 << multiview_props.maxMultiviewViewCount; pipe.CreateVKPipeline(pl.handle(), VK_NULL_HANDLE, &create_info); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, TestMismatchedRenderPassAndPipelineAttachments) { TEST_DESCRIPTION("Test creating a pipeline with no attachments with a render pass with attachments."); ASSERT_NO_FATAL_FAILURE(Init()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-renderPass-06042"); char const *vsSource = R"glsl( #version 450 void main() { } )glsl"; char const *fsSource = R"glsl( #version 450 void main() { } )glsl"; VkShaderObj vs(this, vsSource, VK_SHADER_STAGE_VERTEX_BIT); VkShaderObj fs(this, fsSource, VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineObj pipe(m_device); pipe.AddShader(&vs); pipe.AddShader(&fs); VkViewport viewport = {0.0f, 0.0f, 64.0f, 64.0f, 0.0f, 1.0f}; m_viewports.push_back(viewport); pipe.SetViewport(m_viewports); VkRect2D rect = {}; m_scissors.push_back(rect); pipe.SetScissor(m_scissors); VkDescriptorSetLayoutBinding layout_binding = {}; layout_binding.binding = 1; layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE; layout_binding.descriptorCount = 1; layout_binding.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT; layout_binding.pImmutableSamplers = nullptr; const VkDescriptorSetLayoutObj descriptor_set_layout(m_device, {layout_binding}); const VkPipelineLayoutObj pipeline_layout(DeviceObj(), {&descriptor_set_layout}); pipe.CreateVKPipeline(pipeline_layout.handle(), m_renderPass); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, IncompatibleScissorCountAndViewportCount) { TEST_DESCRIPTION("Validate creating a pipeline with incompatible scissor and viewport count, without dynamic states."); SetTargetApiVersion(VK_API_VERSION_1_3); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_3) { printf("%s test requires Vulkan 1.3+, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); VkViewport viewports[2] = {{0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}, {0.0f, 0.0f, 1.0f, 1.0f, 0.0f, 1.0f}}; auto set_viewport_state_createinfo = [&](CreatePipelineHelper &helper) { helper.vp_state_ci_.viewportCount = 2; helper.vp_state_ci_.pViewports = viewports; }; CreatePipelineHelper::OneshotTest(*this, set_viewport_state_createinfo, kErrorBit, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-04134"); } TEST_F(VkLayerTest, TestCreatingPipelineWithScissorWithCount) { TEST_DESCRIPTION("Validate creating graphics pipeline with dynamic state scissor with count."); SetTargetApiVersion(VK_API_VERSION_1_3); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_3) { printf("%s test requires Vulkan 1.3+, skipping test\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState()); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); { const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT}; VkPipelineDynamicStateCreateInfo dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = 1; dyn_state_ci.pDynamicStates = dyn_states; auto set_viewport_state_createinfo = [&](CreatePipelineHelper &helper) { helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.scissorCount = 0; helper.vp_state_ci_.viewportCount = 0; }; CreatePipelineHelper::OneshotTest(*this, set_viewport_state_createinfo, kErrorBit, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-04136"); } { const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_VIEWPORT_WITH_COUNT, VK_DYNAMIC_STATE_SCISSOR_WITH_COUNT}; VkPipelineDynamicStateCreateInfo dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = 2; dyn_state_ci.pDynamicStates = dyn_states; VkRect2D scissors = {}; auto set_viewport_state_createinfo = [&](CreatePipelineHelper &helper) { helper.dyn_state_ci_ = dyn_state_ci; helper.vp_state_ci_.scissorCount = 1; helper.vp_state_ci_.pScissors = &scissors; helper.vp_state_ci_.viewportCount = 0; }; CreatePipelineHelper::OneshotTest(*this, set_viewport_state_createinfo, kErrorBit, "VUID-VkPipelineViewportStateCreateInfo-scissorCount-04136"); } } TEST_F(VkLayerTest, DynamicSampleLocations) { TEST_DESCRIPTION("Validate dynamic sample locations."); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(Init()); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_SAMPLE_LOCATIONS_EXTENSION_NAME); return; } ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto sample_locations_props = LvlInitStruct<VkPhysicalDeviceSampleLocationsPropertiesEXT>(); auto properties2 = LvlInitStruct<VkPhysicalDeviceProperties2>(&sample_locations_props); vk::GetPhysicalDeviceProperties2(gpu(), &properties2); if ((sample_locations_props.sampleLocationSampleCounts & VK_SAMPLE_COUNT_1_BIT) == 0) { printf("%s Required sample location sample count VK_SAMPLE_COUNT_1_BIT not supported, skipping test.\n", kSkipPrefix); return; } auto vkCmdSetSampleLocationsEXT = reinterpret_cast<PFN_vkCmdSetSampleLocationsEXT>(vk::GetDeviceProcAddr(m_device->device(), "vkCmdSetSampleLocationsEXT")); CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.InitState(); const VkDynamicState dyn_states[] = {VK_DYNAMIC_STATE_SAMPLE_LOCATIONS_EXT}; auto dyn_state_ci = LvlInitStruct<VkPipelineDynamicStateCreateInfo>(); dyn_state_ci.dynamicStateCount = 1; dyn_state_ci.pDynamicStates = dyn_states; pipe.dyn_state_ci_ = dyn_state_ci; pipe.CreateGraphicsPipeline(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-None-06666"); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); VkSampleLocationEXT sample_location = {0.5f, 0.5f}; auto sample_locations_info = LvlInitStruct<VkSampleLocationsInfoEXT>(); sample_locations_info.sampleLocationsPerPixel = VK_SAMPLE_COUNT_1_BIT; sample_locations_info.sampleLocationGridSize = {1u, 1u}; sample_locations_info.sampleLocationsCount = 1; sample_locations_info.pSampleLocations = &sample_location; m_errorMonitor->ExpectSuccess(); vkCmdSetSampleLocationsEXT(m_commandBuffer->handle(), &sample_locations_info); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyNotFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, PrimitivesGeneratedQueryAndDiscardEnabled) { TEST_DESCRIPTION("Test missing primitivesGeneratedQueryWithRasterizerDiscard feature."); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s test requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported.\n", kSkipPrefix, VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME); return; } auto primitives_generated_features = LvlInitStruct<VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT>(); primitives_generated_features.primitivesGeneratedQueryWithRasterizerDiscard = VK_FALSE; auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&primitives_generated_features); ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto rs_ci = LvlInitStruct<VkPipelineRasterizationStateCreateInfo>(); rs_ci.lineWidth = 1.0f; rs_ci.rasterizerDiscardEnable = VK_TRUE; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.rs_state_ci_ = rs_ci; pipe.InitState(); pipe.CreateGraphicsPipeline(); auto query_pool_ci = LvlInitStruct<VkQueryPoolCreateInfo>(); query_pool_ci.queryCount = 1; query_pool_ci.queryType = VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT; vk_testing::QueryPool query_pool; query_pool.init(*m_device, query_pool_ci); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-primitivesGeneratedQueryWithRasterizerDiscard-06708"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool.handle(), 0, 0); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vk::CmdEndQuery(m_commandBuffer->handle(), query_pool.handle(), 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, PrimitivesGeneratedQueryStreams) { TEST_DESCRIPTION("Test missing primitivesGeneratedQueryWithNonZeroStreams feature."); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_EXT_PRIMITIVES_GENERATED_QUERY_EXTENSION_NAME); AddRequiredExtensions(VK_EXT_TRANSFORM_FEEDBACK_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s test requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Required extensions are not supported.\n", kSkipPrefix); return; } auto transform_feedback_features = LvlInitStruct<VkPhysicalDeviceTransformFeedbackFeaturesEXT>(); auto primitives_generated_features = LvlInitStruct<VkPhysicalDevicePrimitivesGeneratedQueryFeaturesEXT>(&transform_feedback_features); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&primitives_generated_features); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (transform_feedback_features.geometryStreams == VK_FALSE) { printf("%s geometryStreams feature not supported, skipping tests.\n", kSkipPrefix); } if (primitives_generated_features.primitivesGeneratedQuery == VK_FALSE) { printf("%s geometryStreams feature not supported, skipping tests.\n", kSkipPrefix); } primitives_generated_features.primitivesGeneratedQueryWithNonZeroStreams = VK_FALSE; ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); auto rasterization_streams = LvlInitStruct<VkPipelineRasterizationStateStreamCreateInfoEXT>(); rasterization_streams.rasterizationStream = 1; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.rs_state_ci_.pNext = &rasterization_streams; pipe.InitState(); pipe.CreateGraphicsPipeline(); auto query_pool_ci = LvlInitStruct<VkQueryPoolCreateInfo>(); query_pool_ci.queryCount = 1; query_pool_ci.queryType = VK_QUERY_TYPE_PRIMITIVES_GENERATED_EXT; vk_testing::QueryPool query_pool; query_pool.init(*m_device, query_pool_ci); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-vkCmdDraw-primitivesGeneratedQueryWithNonZeroStreams-06709"); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBeginQuery(m_commandBuffer->handle(), query_pool.handle(), 0, 0); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); vk::CmdEndQuery(m_commandBuffer->handle(), query_pool.handle(), 0); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); m_errorMonitor->VerifyFound(); } TEST_F(VkLayerTest, InvalidFragmentShadingRateOps) { TEST_DESCRIPTION("Specify invalid fsr pipeline settings for the enabled features"); m_errorMonitor->ExpectSuccess(); // Enable KHR_fragment_shading_rate and all of its required extensions AddRequiredExtensions(VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (!AreRequestedExtensionsEnabled()) { printf("%s %s required but not available\n", kSkipPrefix, VK_KHR_FRAGMENT_SHADING_RATE_EXTENSION_NAME); return; } auto vkGetPhysicalDeviceFeatures2KHR = reinterpret_cast<PFN_vkGetPhysicalDeviceFeatures2KHR>( vk::GetInstanceProcAddr(instance(), "vkGetPhysicalDeviceFeatures2KHR")); ASSERT_TRUE(vkGetPhysicalDeviceFeatures2KHR != nullptr); VkPhysicalDeviceFragmentShadingRateFeaturesKHR fsr_features = LvlInitStruct<VkPhysicalDeviceFragmentShadingRateFeaturesKHR>(); VkPhysicalDeviceFeatures2KHR features2 = LvlInitStruct<VkPhysicalDeviceFeatures2KHR>(&fsr_features); vkGetPhysicalDeviceFeatures2KHR(gpu(), &features2); if (!fsr_features.primitiveFragmentShadingRate) { printf("%s primitiveFragmentShadingRate not available.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); ASSERT_NO_FATAL_FAILURE(InitRenderTarget()); m_errorMonitor->VerifyNotFound(); VkPipelineFragmentShadingRateStateCreateInfoKHR fsr_ci = LvlInitStruct<VkPipelineFragmentShadingRateStateCreateInfoKHR>(); fsr_ci.fragmentSize.width = 1; fsr_ci.fragmentSize.height = 1; fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; fsr_ci.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; auto set_fsr_ci = [&](CreatePipelineHelper &helper) { helper.gp_ci_.pNext = &fsr_ci; }; // Pass an invalid value for op 0 fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-06567"); fsr_ci.combinerOps[0] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_KEEP_KHR; // Pass an invalid value for op 1 fsr_ci.combinerOps[1] = VK_FRAGMENT_SHADING_RATE_COMBINER_OP_MAX_ENUM_KHR; CreatePipelineHelper::OneshotTest(*this, set_fsr_ci, kErrorBit, "VUID-VkGraphicsPipelineCreateInfo-pDynamicState-06568"); } TEST_F(VkLayerTest, TestMaxFragmentDualSrcAttachments) { TEST_DESCRIPTION("Test drawing with dual source blending with too many fragment output attachments."); SetTargetApiVersion(VK_API_VERSION_1_1); ASSERT_NO_FATAL_FAILURE(InitFramework(m_errorMonitor)); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s At least Vulkan version 1.1 is required, skipping test.\n", kSkipPrefix); return; } auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (features2.features.dualSrcBlend == VK_FALSE) { printf("%s dualSrcBlend feature is not available, skipping test.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); uint32_t count = m_device->props.limits.maxFragmentDualSrcAttachments + 1; ASSERT_NO_FATAL_FAILURE(InitRenderTarget(count)); std::stringstream fsSource; fsSource << "#version 450\n"; for (uint32_t i = 0; i < count; ++i) { fsSource << "layout(location = " << i << ") out vec4 c" << i << ";\n"; } fsSource << " void main() {\n"; for (uint32_t i = 0; i < count; ++i) { fsSource << "c" << i << " = vec4(0.0f);\n"; } fsSource << "}"; VkShaderObj fs(this, fsSource.str().c_str(), VK_SHADER_STAGE_FRAGMENT_BIT); VkPipelineColorBlendAttachmentState cb_attachments = {}; cb_attachments.blendEnable = VK_TRUE; cb_attachments.srcColorBlendFactor = VK_BLEND_FACTOR_SRC1_COLOR; // bad! cb_attachments.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR; cb_attachments.colorBlendOp = VK_BLEND_OP_ADD; cb_attachments.srcAlphaBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA; cb_attachments.dstAlphaBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA; cb_attachments.alphaBlendOp = VK_BLEND_OP_ADD; CreatePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cb_attachments_[0] = cb_attachments; pipe.shader_stages_ = {pipe.vs_->GetStageCreateInfo(), fs.GetStageCreateInfo()}; pipe.InitState(); pipe.CreateGraphicsPipeline(); m_commandBuffer->begin(); m_commandBuffer->BeginRenderPass(m_renderPassBeginInfo); vk::CmdBindPipeline(m_commandBuffer->handle(), VK_PIPELINE_BIND_POINT_GRAPHICS, pipe.pipeline_); m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-Fragment-06427"); vk::CmdDraw(m_commandBuffer->handle(), 3, 1, 0, 0); m_errorMonitor->VerifyFound(); m_commandBuffer->EndRenderPass(); m_commandBuffer->end(); } TEST_F(VkLayerTest, TestComputeLocalWorkgroupSize) { TEST_DESCRIPTION("Test size of local workgroud with requiredSubgroupSize."); SetTargetApiVersion(VK_API_VERSION_1_1); AddRequiredExtensions(VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); ASSERT_NO_FATAL_FAILURE(InitFramework()); if (DeviceValidationVersion() < VK_API_VERSION_1_1) { printf("%s Tests requires Vulkan 1.1+, skipping test\n", kSkipPrefix); return; } if (!AreRequestedExtensionsEnabled()) { printf("%s Extension %s is not supported, skipping tests.\n", kSkipPrefix, VK_EXT_SUBGROUP_SIZE_CONTROL_EXTENSION_NAME); return; } auto sscf = LvlInitStruct<VkPhysicalDeviceSubgroupSizeControlFeaturesEXT>(); auto features2 = LvlInitStruct<VkPhysicalDeviceFeatures2>(&sscf); vk::GetPhysicalDeviceFeatures2(gpu(), &features2); if (sscf.subgroupSizeControl == VK_FALSE || sscf.computeFullSubgroups == VK_FALSE) { printf("%s Required features are not supported, skipping test.\n", kSkipPrefix); return; } ASSERT_NO_FATAL_FAILURE(InitState(nullptr, &features2)); auto subgroup_properties = LvlInitStruct<VkPhysicalDeviceSubgroupSizeControlPropertiesEXT>(); auto props = LvlInitStruct<VkPhysicalDeviceProperties2>(&subgroup_properties); vk::GetPhysicalDeviceProperties2(gpu(), &props); auto subgroup_size_control = LvlInitStruct<VkPipelineShaderStageRequiredSubgroupSizeCreateInfoEXT>(); subgroup_size_control.requiredSubgroupSize = subgroup_properties.minSubgroupSize; uint32_t size = static_cast<uint32_t>( std::ceil(std::sqrt(subgroup_size_control.requiredSubgroupSize * subgroup_properties.maxComputeWorkgroupSubgroups))); if (size <= 1024) { std::stringstream csSource; csSource << R"glsl( #version 450 layout(local_size_x= )glsl"; csSource << size; csSource << R"glsl(, local_size_y= )glsl"; csSource << size; csSource << R"glsl(, local_size_z=2) in; void main(){ if (gl_GlobalInvocationID.x >= 0) { return; } } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource.str().c_str(), VK_SHADER_STAGE_COMPUTE_BIT)); pipe.InitState(); pipe.LateBindPipelineInfo(); pipe.cp_ci_.stage.pNext = &subgroup_size_control; if (size * size * 2 > m_device->props.limits.maxComputeWorkGroupInvocations) { m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-RuntimeSpirv-x-06432"); } m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-pNext-02756"); pipe.CreateComputePipeline(true, false); m_errorMonitor->VerifyFound(); } if (subgroup_properties.maxSubgroupSize > 1) { std::stringstream csSource; csSource << R"glsl( #version 450 layout(local_size_x= )glsl"; csSource << subgroup_properties.maxSubgroupSize - 1; csSource << R"glsl(, local_size_y=1, local_size_z=1) in; void main(){ if (gl_GlobalInvocationID.x >= 0) { return; } } )glsl"; CreateComputePipelineHelper pipe(*this); pipe.InitInfo(); pipe.cs_.reset(new VkShaderObj(this, csSource.str().c_str(), VK_SHADER_STAGE_COMPUTE_BIT)); pipe.InitState(); pipe.LateBindPipelineInfo(); pipe.cp_ci_.stage.pNext = &subgroup_size_control; pipe.cp_ci_.stage.flags = VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT; m_errorMonitor->SetDesiredFailureMsg(kErrorBit, "VUID-VkPipelineShaderStageCreateInfo-pNext-02757"); pipe.CreateComputePipeline(true, false); m_errorMonitor->VerifyFound(); } }
#include "Application/Application.h" #include "Input/InputHandler.h" #include "UserInterface/ImGuiLayer.h" #include "Rendering/BufferLayout.h" #include "Rendering/Renderer.h" #include "UserInterface/OutputLog.h" #include "Rendering/RenderTypes.h" inline namespace MARS { Application* Application::Instance = nullptr; OutputLog Application::GlobalOutputLog = OutputLog(); Application::Application() : Camera(-1.6f, 1.6f, -.9f, .9f) { bRunning = true; Instance = this; WindowPtr = std::unique_ptr<Window>(Window::Create()); WindowPtr->SetEventCallback(BIND_EVENT_ONE_PARAM(Application::OnEvent)); ImGuiLayerPtr = new ImGuiLayer; PushOverlay(ImGuiLayerPtr); m_VertArray.reset(VertexArray::Create()); float verts[3 * 7] = { -0.5f, -0.5f, 0.f, 0.2f, 0.8f, 0.2f, 1.f, 0.5f, -0.5f, 0.0f, 0.8f, 0.3f, 0.8f, 1.f, 0.f, 0.5f, 0.f, 0.4f, 0.7f, 0.2f, 1.f }; m_VertBuffer.reset(VertexBuffer::Create(verts, sizeof(verts))); BufferLayout Layout = { { ShaderDataType::Float3, "a_Position" }, { ShaderDataType::Float4, "a_Color" } }; m_VertBuffer->SetLayout(Layout); m_VertArray->AddVertexBuffer(m_VertBuffer); uint32 indices[3] = { 0, 1, 2 }; m_IndexBuffer.reset(IndexBuffer::Create(indices, sizeof(indices) / sizeof(uint32))); m_VertArray->SetIndexBuffer(m_IndexBuffer); SQ_VertArray.reset(VertexArray::Create()); float SQ_verts[3 * 4] = { -0.5f, -0.5f, 0.f, 0.5f, -0.5f, 0.0f, 0.5f, 0.5f, 0.f, -0.5f, 0.5f, 0.f }; SQ_VertBuffer.reset(VertexBuffer::Create(SQ_verts, sizeof(SQ_verts))); SQ_VertBuffer->SetLayout({{ ShaderDataType::Float3, "a_Position" }}); SQ_VertArray->AddVertexBuffer(SQ_VertBuffer); uint32 SQ_indices[6] = { 0, 1, 2, 2, 3, 0 }; SQ_IndexBuffer.reset(IndexBuffer::Create(SQ_indices, sizeof(SQ_indices) / sizeof(uint32))); SQ_VertArray->SetIndexBuffer(SQ_IndexBuffer); String vSource = R"( #version 330 core layout(location = 0) in vec3 a_Position; layout(location = 1) in vec4 a_Color; out vec3 v_Position; out vec4 v_Color; uniform mat4 ViewProjection; void main() { v_Position = a_Position; v_Color = a_Color; gl_Position = ViewProjection * vec4(a_Position, 1.0); } )"; String fSource = R"( #version 330 core layout(location = 0) out vec4 color; in vec3 v_Position; in vec4 v_Color; void main() { color = vec4(v_Position * 0.5 + 0.5, 1.0); color = v_Color; } )"; String vSource2 = R"( #version 330 core layout(location = 0) in vec3 a_Position; out vec3 v_Position; out vec4 v_Color; uniform mat4 ViewProjection; void main() { v_Position = a_Position; gl_Position = ViewProjection * vec4(a_Position, 1.0); } )"; String fSource2 = R"( #version 330 core layout(location = 0) out vec4 color; in vec3 v_Position; in vec4 v_Color; void main() { color = vec4(0.2, 0.3, 0.8, 1.0); } )"; m_Shader = std::make_shared<Shader>(vSource, fSource); m_Shader2 = std::make_shared<Shader>(vSource2, fSource2); } Application::~Application() { } void Application::InitMARS() { Log::Get(LogInit).Info("MARS Pre-Init Completed with 0 errors."); // TODO update this when the error log is written. Run(); } void Application::Run() { while (bRunning) { RenderCommands::SetClearColor(LinearColor::Gray); RenderCommands::Clear(); Renderer::BeginScene(Camera); m_Shader2->Bind(); m_Shader2->UploadUniformMat4(Camera.GetViewProjectionMatrix(), "ViewProjection"); Renderer::Submit(SQ_VertArray); m_Shader->Bind(); m_Shader2->UploadUniformMat4(Camera.GetViewProjectionMatrix(), "ViewProjection"); Renderer::Submit(m_VertArray); Renderer::EndScene(); ImGuiLayerPtr->OnBegin(); for (auto* Element : m_LayerStack) { /*Element->OnUpdate();*/ Element->RenderLayerUI(); } ImGuiLayerPtr->OnEnd(); WindowPtr->Refresh(); } } void Application::OnEvent(Event& e) { EventDispatcher _Dispatcher(e); _Dispatcher.Dispatch<WindowCloseEvent>(BIND_EVENT_ONE_PARAM(Application::OnWindowClose)); for (auto It = m_LayerStack.end(); It != m_LayerStack.begin();) { (*--It)->OnEvent(e); if (e.IsEventHandled()) { break; } } } void Application::PushLayer(Layer* InLayer) { m_LayerStack.PushElement(InLayer); InLayer->OnAttach(); } void Application::PushOverlay(Layer* InOverlay) { m_LayerStack.PushOverlay(InOverlay); InOverlay->OnAttach(); } bool Application::OnWindowClose(WindowCloseEvent& e) { UNUSED_PROPERTY(e) bRunning = false; return true; } }
#include "hello_world.h" #include "test/catch.hpp" TEST_CASE("test_hello") { REQUIRE(hello_world::hello() == "Hello, World!"); } TEST_CASE("test_backyard") { REQUIRE(hello_world::backyard() == "OMG! There's a giant branch!"); }
//================================================================================================= /*! // \file src/blaze/DMatTDMatMult.cpp // \brief Source file for the Blaze dense matrix/transpose dense matrix multiplication kernel // // Copyright (C) 2012-2018 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= //************************************************************************************************* // Includes //************************************************************************************************* #include <iostream> #include <blaze/math/DynamicMatrix.h> #include <blaze/util/Timing.h> #include <blazemark/blaze/DMatTDMatMult.h> #include <blazemark/blaze/init/DynamicMatrix.h> #include <blazemark/system/Config.h> namespace blazemark { namespace blaze { //================================================================================================= // // KERNEL FUNCTIONS // //================================================================================================= //************************************************************************************************* /*!\brief Blaze dense matrix/transpose dense matrix multiplication kernel. // // \param N The number of rows and columns of the matrices. // \param steps The number of iteration steps to perform. // \return Minimum runtime of the kernel function. // // This kernel function implements the dense matrix/transpose dense matrix multiplication by // means of the Blaze functionality. */ double dmattdmatmult( size_t N, size_t steps ) { using ::blazemark::element_t; using ::blaze::rowMajor; using ::blaze::columnMajor; ::blaze::setSeed( seed ); ::blaze::DynamicMatrix<element_t,rowMajor> A( N, N ), C( N, N ); ::blaze::DynamicMatrix<element_t,columnMajor> B( N, N ); ::blaze::timing::WcTimer timer; init( A ); init( B ); C = A * B; for( size_t rep=0UL; rep<reps; ++rep ) { timer.start(); for( size_t step=0UL; step<steps; ++step ) { C = A * B; } timer.end(); if( C.rows() != N ) std::cerr << " Line " << __LINE__ << ": ERROR detected!!!\n"; if( timer.last() > maxtime ) break; } const double minTime( timer.min() ); const double avgTime( timer.average() ); if( minTime * ( 1.0 + deviation*0.01 ) < avgTime ) std::cerr << " Blaze kernel 'dmattdmatmult': Time deviation too large!!!\n"; return minTime; } //************************************************************************************************* } // namespace blaze } // namespace blazemark
// Purpose: // Test that LimitStep commands can exist on the same from line. // // REQUIRES: system-linux // // RUN: %dexter_regression_test -- %s | FileCheck %s // CHECK: limit_steps_same_line_conditional.cpp int main() { int val1 = 0; int placeholder; for(int ix = 0; ix != 4; ++ix) { val1 = ix; placeholder = ix; // DexLabel('from') placeholder = ix; val1 += 2; // DexLabel('to') placeholder = ix; // DexLabel('extended_to') } return val1 + placeholder; } // DexExpectWatchValue('val1', 0, 1, 3, from_line=ref('from'), to_line=ref('extended_to')) // DexLimitSteps('ix', 0, from_line=ref('from'), to_line=ref('to')) // DexLimitSteps('ix', 1, from_line=ref('from'), to_line=ref('extended_to'))
// Copyright (C) 2015-2018 Cameron Angus. All Rights Reserved. #include "Charts/SKantanCartesianChart.h" #include "IDataSeriesElement.h" #include "KantanCartesianDatasourceInterface.h" #include "SimpleRenderTarget.h" #include "FloatRoundingLevel.h" #include "AxisUtility.h" #include "ChartConstants.h" #include "Style/KantanSeriesStyleSet.h" #include "KantanChartsSlateModule.h" #include "Framework/Application/SlateApplication.h" #include "RenderingThread.h" #include "Engine/Texture2D.h" #include "RenderUtils.h" #include "CanvasTypes.h" #include "CanvasItem.h" #include "Fonts/FontMeasure.h" TSharedRef< IDataSeriesElement, ESPMode::ThreadSafe > MakeCustomSeriesElement(UTexture2D* Tex, FVector2D PntSz, FLinearColor Cl, FBox2D UVs); TSharedRef< IDataSeriesElement, ESPMode::ThreadSafe > MakeSlateBoxSeriesElement(UTexture2D* Tex, FVector2D PntSz, FLinearColor Cl, FBox2D UVs); SKantanCartesianChart::~SKantanCartesianChart() { DiscardAllDrawingElements(); } void SKantanCartesianChart::Construct(const FArguments& InArgs) { SKantanChart::Construct( SKantanChart::FArguments() .UpdateTickRate(InArgs._UpdateTickRate) ); SetStyle(InArgs._Style); SetDatasource(InArgs._Datasource); SetUseAutoPerSeriesStyles(true); SetPlotScale(InArgs._PlotScale); SetDataPointSize(InArgs._DataPointSize); SetAxisTitlePadding(FMargin(0.0f, 4.0f)); SetAntialiasDataLines(true); SetOnUpdatePlotScale(InArgs._OnUpdatePlotScale); } void SKantanCartesianChart::SetStyle(const FKantanCartesianChartStyle* InStyle) { Style = InStyle; } void SKantanCartesianChart::SetStyleFromAsset(USlateWidgetStyleContainerBase* InStyleAsset) { if(auto Asset = Cast< UKantanCartesianChartWidgetStyle >(InStyleAsset)) { Style = &Asset->ChartStyle; } } bool SKantanCartesianChart::SetDatasource(UObject* InDatasource) { if (IsNullOrValidDatasource(InDatasource) == false) { return false; } if (Datasource != InDatasource) { Datasource = InDatasource; if (Datasource) { // Immediately update the data snapshot DataSnapshot.UpdateFromDatasource(Datasource); } else { DataSnapshot.Clear(); } UpdateSeriesConfigFromDatasource(); UpdateDrawingElementsFromDatasource(); } return true; } void SKantanCartesianChart::SetUseAutoPerSeriesStyles(bool bEnable) { bAutoPerSeriesStyles = bEnable; } void SKantanCartesianChart::SetSeriesStylesList(TArray< FKantanSeriesStyle > const& Styles) { SeriesStyles = Styles; } void SKantanCartesianChart::LoadSeriesStylesList(const FSoftObjectPath& Styles) { auto SeriesStyleSet = Cast< UKantanSeriesStyleSet >(Styles.TryLoad()); if(SeriesStyleSet) { SetSeriesStylesList(SeriesStyleSet->Styles); } } void SKantanCartesianChart::SetManualSeriesStyleMappings(TMap< FName, FName > const& Mappings) { for (auto const& Mp : Mappings) { auto const& Id = Mp.Key; auto& Cfg = SeriesConfig.FindOrAdd(Id); Cfg.SeriesStyleId = Mp.Value; } } void SKantanCartesianChart::SetPlotScale(FKantanCartesianPlotScale const& Scaling) { PlotScale = Scaling; } void SKantanCartesianChart::SetDataPointSize(EKantanDataPointSize::Type InSize) { DataPointSize = InSize; } void SKantanCartesianChart::SetAxisTitlePadding(FMargin const& InPadding) { AxisTitlePadding = InPadding; } void SKantanCartesianChart::SetXAxisConfig(FCartesianAxisConfig const& InConfig) { XAxisCfg = InConfig; } void SKantanCartesianChart::SetYAxisConfig(FCartesianAxisConfig const& InConfig) { YAxisCfg = InConfig; } void SKantanCartesianChart::SetAntialiasDataLines(bool bEnable) { bAntialiasDataLines = bEnable; } void SKantanCartesianChart::SetOnUpdatePlotScale(FOnUpdatePlotScale Delegate) { OnUpdatePlotScaleDelegate = Delegate; } void SKantanCartesianChart::EnableSeries(FName Id, bool bEnable) { auto& Cfg = SeriesConfig.FindOrAdd(Id); Cfg.bEnabled = bEnable; } void SKantanCartesianChart::ConfigureSeries(FName Id, bool bDrawPoints, bool bDrawLines) { auto& Cfg = SeriesConfig.FindOrAdd(Id); Cfg.bDrawPoints = bDrawPoints; Cfg.bDrawLines = bDrawLines; } void SKantanCartesianChart::SetSeriesStyle(FName Id, FName StyleId) { auto& Cfg = SeriesConfig.FindOrAdd(Id); Cfg.SeriesStyleId = StyleId; } void SKantanCartesianChart::ResetSeries(FName Id) { if (Id == NAME_None) { SeriesConfig.Empty(); } else { SeriesConfig.Remove(Id); } } /* void SKantanCartesianChart::ResetSeriesNotInDatasource() { TSet< FName > ToRemove; for(auto const& Cfg : SeriesConfig) { auto Id = Cfg.Key; if() { ToRemove.Add(Id); } } for(auto Id : ToRemove) { SeriesConfig.Remove(Id); } } */ bool SKantanCartesianChart::IsSeriesEnabled(FName Id) const { auto Cfg = SeriesConfig.Find(Id); return Cfg && Cfg->bEnabled; } bool SKantanCartesianChart::IsSeriesShowingLines(FName Id) const { auto Cfg = SeriesConfig.Find(Id); return Cfg && Cfg->bDrawLines; } bool SKantanCartesianChart::IsSeriesShowingPoints(FName Id) const { auto Cfg = SeriesConfig.Find(Id); return Cfg && Cfg->bDrawPoints; } int32 SKantanCartesianChart::GetNumSeries() const { /* return DatasourceInterface ? IKantanCartesianDatasourceInterface::Execute_GetNumSeries(DatasourceInterface) : 0; */ return DataSnapshot.Elements.Num(); } FName SKantanCartesianChart::GetSeriesId(int32 Index) const { /* return DatasourceInterface ? IKantanCartesianDatasourceInterface::Execute_GetSeriesId(DatasourceInterface, Index) : NAME_None; */ return DataSnapshot.Elements[Index].Id; } FText SKantanCartesianChart::GetSeriesLabel(int32 Index) const { /* return DatasourceInterface ? IKantanCartesianDatasourceInterface::Execute_GetSeriesName(DatasourceInterface, Index) : FText(); */ return DataSnapshot.Elements[Index].Name; } TArray< FKantanCartesianDatapoint > SKantanCartesianChart::GetSeriesDatapoints(int32 Index) const { /* return DatasourceInterface ? IKantanCartesianDatasourceInterface::Execute_GetSeriesDatapoints(DatasourceInterface, Index) : TArray < FKantanCartesianDatapoint > {}; */ return DataSnapshot.Elements[Index].Points; } FKantanSeriesStyle const& SKantanCartesianChart::GetSeriesStyle(FName SeriesId) const { // @TODO prob temp should be chart property static FKantanSeriesStyle DefaultStyle = FKantanSeriesStyle(); if (SeriesId.IsNone() == false) { auto CfgEntry = SeriesConfig.Find(SeriesId); if (CfgEntry && CfgEntry->SeriesStyleId.IsNone() == false) { auto SeriesStyle = FindSeriesStyle(CfgEntry->SeriesStyleId); if (SeriesStyle) { return *SeriesStyle; } } } return DefaultStyle; } bool SKantanCartesianChart::IsValidDatasource(UObject* Source) { return Source != nullptr && Source->GetClass()->ImplementsInterface(IKantanCartesianDatasourceInterface::UClassType::StaticClass()); } bool SKantanCartesianChart::IsNullOrValidDatasource(UObject* Source) { return Source == nullptr || IsValidDatasource(Source); } void SKantanCartesianChart::DiscardDrawingElements(TArray< FSeriesElemPtr >& Elements) { // Pass ownership of the series elements to the render thread so that they're deleted when the // render thread is done with them ENQUEUE_RENDER_COMMAND(SafeDeleteSeriesElements)( [ElemList = MakeUnique<TArray< FSeriesElemPtr >>(MoveTemp(Elements))](FRHICommandListImmediate& RHICmdList) { ElemList->Empty(); } ); Elements.Empty(); } void SKantanCartesianChart::DiscardAllDrawingElements() { TArray< FSeriesElemPtr > Elems; SeriesElements.GenerateValueArray(Elems); SeriesElements.Empty(); DiscardDrawingElements(Elems); } void SKantanCartesianChart::UpdateDrawingElementsFromDatasource() { TArray< FName > Unused; SeriesElements.GenerateKeyArray(Unused); const auto NumSeries = GetNumSeries(); for (int32 Idx = 0; Idx < NumSeries; ++Idx) { const auto SeriesId = GetSeriesId(Idx); if (SeriesId.IsNone() == false) { if(SeriesElements.Contains(SeriesId) == false) { const bool bCustomPoints = KantanCharts::FKantanChartsSlateModule::bCustomSeriesDrawing; // Series not in map // Create new slate rendering element and add to map const auto ChartStyle = GetChartStyle(); const auto& SeriesStyle = GetSeriesStyle(SeriesId); const auto PointTexture = SeriesStyle.HasValidPointStyle() ? SeriesStyle.PointStyle->DataPointTexture : nullptr; const int32 DP_PixelSize = KantanDataPointPixelSizes[DataPointSize]; const auto PointSize = FVector2D(DP_PixelSize, DP_PixelSize); const auto PointColor = SeriesStyle.Color * FLinearColor(1, 1, 1, ChartStyle->DataOpacity); auto PointUVs = FBox2D(FVector2D(0.0f, 0.0f), FVector2D(1.0f, 1.0f)); if (SeriesStyle.HasValidPointStyle()) { const auto PointStyle = SeriesStyle.PointStyle; const FVector2D TextureSize = FVector2D( PointStyle->DataPointTexture->GetSizeX(), PointStyle->DataPointTexture->GetSizeY() ); PointUVs.Min.Set( PointStyle->PointSizeTextureOffsets[DataPointSize].X / TextureSize.X, PointStyle->PointSizeTextureOffsets[DataPointSize].Y / TextureSize.Y ); PointUVs.Max.Set( (PointStyle->PointSizeTextureOffsets[DataPointSize].X + DP_PixelSize) / TextureSize.X, (PointStyle->PointSizeTextureOffsets[DataPointSize].Y + DP_PixelSize) / TextureSize.Y ); } if (bCustomPoints) { SeriesElements.Add(SeriesId, MakeCustomSeriesElement(PointTexture, PointSize, PointColor, PointUVs)); } else { SeriesElements.Add(SeriesId, MakeSlateBoxSeriesElement(PointTexture, PointSize, PointColor, PointUVs)); } } Unused.Remove(SeriesId); } } // Remove unused elements TArray< FSeriesElemPtr > UnusedElements; for(auto const& Id : Unused) { UnusedElements.Add(SeriesElements.FindAndRemoveChecked(Id)); } DiscardDrawingElements(UnusedElements); } void SKantanCartesianChart::UpdateSeriesConfigFromDatasource() { TArray< FName > Unused; SeriesConfig.GenerateKeyArray(Unused); // Loop through all series in the datasource const auto NumSeries = GetNumSeries(); for (int32 Idx = 0; Idx < NumSeries; ++Idx) { const auto SeriesId = GetSeriesId(Idx); if (SeriesId.IsNone() == false) { auto Cfg = SeriesConfig.Find(SeriesId); if (Cfg == nullptr) { // Not configured, set up default config Cfg = &SeriesConfig.Add(SeriesId, FSeriesConfig{}); } if (bAutoPerSeriesStyles && Cfg->SeriesStyleId.IsNone()) { Cfg->SeriesStyleId = GetNextSeriesStyle(); } Unused.Remove(SeriesId); } } // Remove unused configs to free up series styles for(auto const& Id : Unused) { SeriesConfig.Remove(Id); } } FName SKantanCartesianChart::GetNextSeriesStyle() const { auto IsStyleUsed = [this](FName const& StyleId) { for (auto const& Cfg : SeriesConfig) { if (Cfg.Value.SeriesStyleId == StyleId) { return true; } } return false; }; for (auto const& SeriesStyle : SeriesStyles) { if (IsStyleUsed(SeriesStyle.StyleId) == false) { // This style currently not being used return SeriesStyle.StyleId; } } // Currently if no unused styles available, return None (which will lead to using default style) return NAME_None; } FCartesianAxisRange SKantanCartesianChart::ValidateAxisDisplayRange(FCartesianAxisRange InRange) { if(InRange.ContainsNaNOrInf()) { // @TODO: Log error return FCartesianAxisRange(-1.0f, 1.0f); } // Normalize if(InRange.Min > InRange.Max) { Swap(InRange.Min, InRange.Max); } // Disallow zero sized range if(InRange.IsZero()) { if(InRange.Max == 0.0f) { InRange.Min = -1.0f; InRange.Max = 1.0f; } else { // Incr max { int n = (int)std::floor(std::log10(std::abs(InRange.Max))); // Loop until we find an incremented value that is representable as a value distinct from Max auto NewMax = InRange.Max; for(; NewMax == InRange.Max && (NewMax == 0.0f || std::isnormal(NewMax)); ++n) { auto Incr = std::pow(10.0f, n); NewMax = InRange.Max + Incr; } if(std::isnormal(NewMax)) { InRange.Max = NewMax; } } // Decr min { int n = (int)std::floor(std::log10(std::abs(InRange.Min))); // Loop until we find an incremented value that is representable as a value distinct from Max auto NewMin = InRange.Min; for(; NewMin == InRange.Min && (NewMin == 0.0f || std::isnormal(NewMin)); ++n) { auto Decr = std::pow(10.0f, n); NewMin = InRange.Min - Decr; } if(std::isnormal(NewMin)) { InRange.Min = NewMin; } } ensure(InRange.IsZero() == false); } } return InRange; } int32 SKantanCartesianChart::DrawChartArea( EChartContentArea::Type Area, const FPaintArgs& Args, const FGeometry& Geometry, const FGeometry& PlotSpaceGeometry, const FSlateRect& MyClippingRect, FSlateWindowElementList& OutDrawElements, int32 LayerId, const FWidgetStyle& InWidgetStyle, bool bParentEnabled ) const { // Used to track the layer ID we will return. int32 RetLayerId = LayerId; bool bEnabled = ShouldBeEnabled(bParentEnabled); const ESlateDrawEffect DrawEffects = bEnabled ? ESlateDrawEffect::None : ESlateDrawEffect::DisabledEffect; // Pre-snap the clipping rect to try and reduce common jitter, since the padding is typically only a single pixel. FSlateRect SnappedClippingRect = FSlateRect(FMath::RoundToInt(MyClippingRect.Left), FMath::RoundToInt(MyClippingRect.Top), FMath::RoundToInt(MyClippingRect.Right), FMath::RoundToInt(MyClippingRect.Bottom)); if (PlotSpaceGeometry.GetLocalSize().X == 0 || PlotSpaceGeometry.GetLocalSize().Y == 0) { // @TODO: Bit of a cheap way out, avoiding some division by zero issues return RetLayerId; } switch (Area) { case EChartContentArea::XAxisBottomTitle: if (XAxisCfg.LeftBottomAxis.bEnabled && XAxisCfg.LeftBottomAxis.bShowTitle) { DrawXAxisTitle(Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, XAxisCfg, GetCachedMarkerData(EAxis::X, PlotSpaceGeometry)); } break; case EChartContentArea::XAxisTopTitle: if (XAxisCfg.RightTopAxis.bEnabled && XAxisCfg.RightTopAxis.bShowTitle) { DrawXAxisTitle(Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, XAxisCfg, GetCachedMarkerData(EAxis::X, PlotSpaceGeometry)); } break; case EChartContentArea::YAxisLeftTitle: if (YAxisCfg.LeftBottomAxis.bEnabled && YAxisCfg.LeftBottomAxis.bShowTitle) { DrawYAxisTitle(Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, YAxisCfg, GetCachedMarkerData(EAxis::Y, PlotSpaceGeometry)); } break; case EChartContentArea::YAxisRightTitle: if (YAxisCfg.RightTopAxis.bEnabled && YAxisCfg.RightTopAxis.bShowTitle) { DrawYAxisTitle(Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, YAxisCfg, GetCachedMarkerData(EAxis::Y, PlotSpaceGeometry)); } break; case EChartContentArea::XAxisBottom: if (XAxisCfg.LeftBottomAxis.bEnabled) { DrawFixedAxis( Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, EAxis::X, AxisUtil::FAxisTransform::FromTransform2D(CartesianToPlotTransform(PlotSpaceGeometry), 0 /* X axis */), EChartAxisPosition::LeftBottom, GetCachedMarkerData(EAxis::X, PlotSpaceGeometry), XAxisCfg.LeftBottomAxis.bShowMarkers, XAxisCfg.LeftBottomAxis.bShowLabels, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap ); } break; case EChartContentArea::XAxisTop: if (XAxisCfg.RightTopAxis.bEnabled) { DrawFixedAxis( Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, EAxis::X, AxisUtil::FAxisTransform::FromTransform2D(CartesianToPlotTransform(PlotSpaceGeometry), 0 /* X axis */), EChartAxisPosition::RightTop, GetCachedMarkerData(EAxis::X, PlotSpaceGeometry), XAxisCfg.RightTopAxis.bShowMarkers, XAxisCfg.RightTopAxis.bShowLabels, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap ); } break; case EChartContentArea::YAxisLeft: if (YAxisCfg.LeftBottomAxis.bEnabled) { DrawFixedAxis( Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, EAxis::Y, AxisUtil::FAxisTransform::FromTransform2D(CartesianToPlotTransform(PlotSpaceGeometry), 1 /* Y axis */), EChartAxisPosition::LeftBottom, GetCachedMarkerData(EAxis::Y, PlotSpaceGeometry), YAxisCfg.LeftBottomAxis.bShowMarkers, YAxisCfg.LeftBottomAxis.bShowLabels, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap ); } break; case EChartContentArea::YAxisRight: if (YAxisCfg.RightTopAxis.bEnabled) { DrawFixedAxis( Geometry, SnappedClippingRect, OutDrawElements, RetLayerId, EAxis::Y, AxisUtil::FAxisTransform::FromTransform2D(CartesianToPlotTransform(PlotSpaceGeometry), 1 /* Y axis */), EChartAxisPosition::RightTop, GetCachedMarkerData(EAxis::Y, PlotSpaceGeometry), YAxisCfg.RightTopAxis.bShowMarkers, YAxisCfg.RightTopAxis.bShowLabels, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap ); } break; case EChartContentArea::Plot: { // Add 1 unit to right and bottom of clip rect for purposes of drawing axes const FSlateRect AxisClipRect = SnappedClippingRect.ExtendBy(FMargin(0, 0, 1, 1)); //Geometry.GetRenderBoundingRect(FMargin(0, 0, 1, 1)); OutDrawElements.PushClip(FSlateClippingZone(AxisClipRect)); auto AxisLayer = RetLayerId; FPlotMarkerData PlotMarkerData; PlotMarkerData.XAxis = GetCachedMarkerData(EAxis::X, PlotSpaceGeometry); PlotMarkerData.YAxis = GetCachedMarkerData(EAxis::Y, PlotSpaceGeometry); RetLayerId = DrawAxes(PlotSpaceGeometry, AxisClipRect, OutDrawElements, AxisLayer, AxisLayer + 2, PlotMarkerData); OutDrawElements.PopClip(); // Inflate slightly to avoid clipping plot lines lying exactly along the edges of the plot area. // @NOTE: Bit random, but apparently 1.0 on the vertical is not sufficient to stop this. const FSlateRect DataClipRect = SnappedClippingRect.ExtendBy(FMargin(0.5f, 2.0f)); //PlotSpaceGeometry.GetRenderBoundingRect(FMargin(0.5f, 2.0f)); OutDrawElements.PushClip(FSlateClippingZone(DataClipRect)); auto ChartStyle = GetChartStyle(); auto NumSeries = GetNumSeries(); for (int32 Idx = 0; Idx < NumSeries; ++Idx) { auto SeriesId = GetSeriesId(Idx); if (SeriesId.IsNone()) { continue; } auto const& Config = SeriesConfig[SeriesId]; if (Config.bEnabled == false) { continue; } // Don't render if no element is setup for this series if (SeriesElements.Contains(SeriesId) == false) { continue; } const auto Points = GetSeriesDatapoints(Idx); auto const& SeriesStyle = GetSeriesStyle(SeriesId); // @TODO: Sort out layers, maybe need to separate out DrawAxes into DrawAxisLines and DrawAxisLabels DrawSeries(PlotSpaceGeometry, DataClipRect, OutDrawElements, AxisLayer + 1, SeriesId, Points, SeriesStyle); } OutDrawElements.PopClip(); } break; } return RetLayerId; } /** * Computes the desired size of this widget (SWidget) * * @return The widget's desired size */ FVector2D SKantanCartesianChart::ComputeDesiredSize( float ) const { return FVector2D(300, 300); } void SKantanCartesianChart::OnActiveTick(double InCurrentTime, float InDeltaTime) { if (Datasource != nullptr) { // @TODO: Can't we get away with only updating enabled series here? DataSnapshot.UpdateFromDatasource(Datasource); if(OnUpdatePlotScaleDelegate.IsBound()) { TArray< int32 > EnabledIndices; for(int32 Idx = 0; Idx < DataSnapshot.Elements.Num(); ++Idx) { if(IsSeriesEnabled(DataSnapshot.Elements[Idx].Id)) { EnabledIndices.Add(Idx); } } PlotScale = OnUpdatePlotScaleDelegate.Execute(DataSnapshot, EnabledIndices); } UpdateSeriesConfigFromDatasource(); UpdateDrawingElementsFromDatasource(); } } void SKantanCartesianChart::Tick(const FGeometry& AllottedGeometry, const double InCurrentTime, const float InDeltaTime) { // @TODO: Ideally, we should only do this when we detect a change in geometry from the last time the marker data // was calculated, or in direct response to a change to some setting, or data update leading to altered plot scale. InvalidateCachedMarkerData(EAxis::X); InvalidateCachedMarkerData(EAxis::Y); SKantanChart::Tick(AllottedGeometry, InCurrentTime, InDeltaTime); } void SKantanCartesianChart::GetPointsToDraw( TArray< FKantanCartesianDatapoint > const& InPoints, FCartesianAxisRange const& RangeX, FCartesianAxisRange const& RangeY, TArray< FVector2D >& OutPoints) const { // Cull points outside the plot range // @TODO: Not ideal, using arbitrary inflation amount to avoid culling points which are outside // range but may be partially visible. To do this correctly would need knowledge of plot geometry size. auto XCullRange = RangeX.Normalized(); XCullRange.Expand(1.1f); auto YCullRange = RangeY.Normalized(); YCullRange.Expand(1.1f); auto const Count = InPoints.Num(); OutPoints.Reset(Count); for (auto const& Pnt : InPoints) { if ( Pnt.Coords.X >= XCullRange.Min && Pnt.Coords.X <= XCullRange.Max && Pnt.Coords.Y >= YCullRange.Min && Pnt.Coords.Y <= YCullRange.Max ) { OutPoints.Add(Pnt.Coords); } } } void SKantanCartesianChart::GetLinePointsToDraw( TArray< FKantanCartesianDatapoint > const& InPoints, FCartesianAxisRange const& RangeX, FCartesianAxisRange const& RangeY, TArray< FVector2D >& OutPoints) const { // @TODO: Can't cull so easily - would need to output multiple line batches, and cull based on whether // consecutive points are outside the range on the same side (eg. both > RangeY.Max) auto const Count = InPoints.Num(); OutPoints.SetNumUninitialized(Count); for (int32 Idx = 0; Idx < Count; ++Idx) { OutPoints[Idx] = InPoints[Idx].Coords; } } int32 SKantanCartesianChart::DrawPoints(const FGeometry& PlotSpaceGeometry, const FSlateRect& ClipRect, FSlateWindowElementList& OutDrawElements, int32 LayerId, FName const& SeriesId, TArray< FKantanCartesianDatapoint > const& Points, FKantanSeriesStyle const& SeriesStyle) const { ++LayerId; auto& Element = SeriesElements.FindChecked(SeriesId); const EKantanDataPointSize::Type DP_SizeType = DataPointSize; const int32 DP_PixelSize = KantanDataPointPixelSizes[DP_SizeType]; const auto RangeX = PlotScale.GetXRange(PlotSpaceGeometry.GetLocalSize()); const auto RangeY = PlotScale.GetYRange(PlotSpaceGeometry.GetLocalSize()); TArray< FVector2D > DrawPoints; GetPointsToDraw(Points, RangeX, RangeY, DrawPoints); auto const CartesianToPlotXform = CartesianToPlotTransform(PlotSpaceGeometry); Element->RenderSeries(PlotSpaceGeometry, ClipRect, CartesianToPlotXform, MoveTemp(DrawPoints), LayerId, OutDrawElements); return LayerId; } int32 SKantanCartesianChart::DrawLines(const FGeometry& PlotSpaceGeometry, const FSlateRect& ClipRect, FSlateWindowElementList& OutDrawElements, int32 LayerId, FName const& SeriesId, TArray< FKantanCartesianDatapoint > const& Points, FKantanSeriesStyle const& SeriesStyle) const { ++LayerId; auto CartesianToPlotXform = CartesianToPlotTransform(PlotSpaceGeometry); auto RangeX = PlotScale.GetXRange(PlotSpaceGeometry.GetLocalSize()); auto RangeY = PlotScale.GetYRange(PlotSpaceGeometry.GetLocalSize()); TArray< FVector2D > DrawPoints; GetLinePointsToDraw(Points, RangeX, RangeY, DrawPoints); for (auto& Pnt : DrawPoints) { // @TODO: If transform points like this and draw via plot space geometry, all works. // If leave untransformed and draw via cartesian space geometry, get bizarre clipping behaviour. // Suspect is related to there being a render transform component to the mapping between plot space // and cartesian space. Still seems a little strange though given that clip rect is apparently given // in absolute coords, would expect that if the element is drawn in the correct place, it would be // clipped correctly too. Pnt = CartesianToPlotXform.TransformPoint(Pnt); // } auto ChartStyle = GetChartStyle(); // @TODO: Drawing individual segments in attempt to workaround antialiasing issue TArray< FVector2D > SegmentPoints; SegmentPoints.SetNumUninitialized(2); for (int32 Idx = 0; Idx < DrawPoints.Num() - 1; ++Idx) { SegmentPoints[0] = DrawPoints[Idx]; SegmentPoints[1] = DrawPoints[Idx + 1]; FSlateDrawElement::MakeLines( OutDrawElements, LayerId, PlotSpaceGeometry.ToPaintGeometry(), SegmentPoints,//DrawPoints, //ClipRect.ExtendBy(ChartConstants::ChartClipRectExtension), ESlateDrawEffect::None, SeriesStyle.Color * FLinearColor(1, 1, 1, ChartStyle->DataOpacity), bAntialiasDataLines, GetChartStyle()->DataLineThickness ); } return LayerId; } int32 SKantanCartesianChart::DrawSeries(const FGeometry& PlotSpaceGeometry, const FSlateRect& ClipRect, FSlateWindowElementList& OutDrawElements, int32 LayerId, FName const& SeriesId, TArray< FKantanCartesianDatapoint > const& Points, FKantanSeriesStyle const& SeriesStyle) const { if (SeriesConfig[SeriesId].bDrawLines) { LayerId = DrawLines(PlotSpaceGeometry, ClipRect, OutDrawElements, LayerId, SeriesId, Points, SeriesStyle); } if (SeriesConfig[SeriesId].bDrawPoints) { LayerId = DrawPoints(PlotSpaceGeometry, ClipRect, OutDrawElements, LayerId, SeriesId, Points, SeriesStyle); } return LayerId; } int32 SKantanCartesianChart::DrawAxes(const FGeometry& PlotSpaceGeometry, const FSlateRect& ClipRect, FSlateWindowElementList& OutDrawElements, int32 AxisLayerId, int32 LabelLayerId, FPlotMarkerData const& MarkerData) const { auto ChartStyle = GetChartStyle(); auto const CartesianToPlotXform = CartesianToPlotTransform(PlotSpaceGeometry); auto const LocalOrigin = CartesianToPlotXform.TransformPoint(FVector2D::ZeroVector); auto const PlotSize = PlotSpaceGeometry.GetLocalSize(); // Determine cartesian range covered by plot space auto CartesianRangeMin = ::Inverse(CartesianToPlotXform).TransformPoint(FVector2D::ZeroVector); auto CartesianRangeMax = ::Inverse(CartesianToPlotXform).TransformPoint(PlotSpaceGeometry.GetLocalSize()); if (CartesianRangeMin.X > CartesianRangeMax.X) { Swap(CartesianRangeMin.X, CartesianRangeMax.X); } if (CartesianRangeMin.Y > CartesianRangeMax.Y) { Swap(CartesianRangeMin.Y, CartesianRangeMax.Y); } auto FontMeasureService = FSlateApplication::Get().GetRenderer()->GetFontMeasureService(); FSlateFontInfo AxisMarkerFont = GetLabelFont(ChartStyle, EKantanChartLabelClass::AxisMarkerLabel); // Horizontal axis if (XAxisCfg.FloatingAxis.bEnabled) { const float Y0 = FMath::Clamp(LocalOrigin.Y, 0.0f, PlotSize.Y); { TArray< FVector2D > Points; Points.Add(FVector2D( 0.0f, Y0 )); Points.Add(FVector2D( PlotSize.X, Y0 )); FSlateDrawElement::MakeLines( OutDrawElements, AxisLayerId, PlotSpaceGeometry.ToPaintGeometry(), Points, //ClipRect, ESlateDrawEffect::None, ChartStyle->ChartLineColor, false, ChartStyle->ChartLineThickness ); } const auto XRounding = MarkerData.XAxis.RL; const auto LabelMaxExtents = DetermineAxisValueLabelMaxExtents(EAxis::X, XAxisCfg.MaxValueDigits); const bool bFitsBelow = LabelMaxExtents.Y < (PlotSize.Y - Y0); if (false) // @TODO: if show axis multiplier and unit on axis { // Axis unit text FText UnitText = FText::Format( FText::FromString(TEXT("x{0}")), FText::FromString(XRounding.ExponentAsString()) ); auto UnitExtents = FontMeasureService->Measure(UnitText, AxisMarkerFont); auto UnitGeometry = PlotSpaceGeometry.MakeChild( UnitExtents, FSlateLayoutTransform(FVector2D(PlotSize.X - UnitExtents.X, Y0 - UnitExtents.Y)) ); FSlateDrawElement::MakeText( OutDrawElements, LabelLayerId, UnitGeometry.ToPaintGeometry(), UnitText, AxisMarkerFont, //ClipRect, ESlateDrawEffect::None, ChartStyle->FontColor); } // Axis markers and labels const auto XStart = XRounding.RoundUp(CartesianRangeMin.X); for (auto RoundedMarkerX = XStart; RoundedMarkerX.GetFloatValue() < CartesianRangeMax.X; ++RoundedMarkerX) { if (RoundedMarkerX.IsZero()) // @TODO: && other floating axis is drawn { continue; } const auto MarkerX = RoundedMarkerX.GetFloatValue(); auto const MarkerYOffset = bFitsBelow ? ChartConstants::AxisMarkerLength : -ChartConstants::AxisMarkerLength; auto const LabelYOffset = bFitsBelow ? 0.0f : -LabelMaxExtents.Y; TArray< FVector2D > Points; const auto MarkerXPlotSpace = CartesianToPlotXform.TransformPoint(FVector2D(MarkerX, 0.0f)).X; Points.Add(FVector2D( MarkerXPlotSpace, Y0 )); Points.Add(FVector2D( MarkerXPlotSpace, Y0 + MarkerYOffset )); if (XAxisCfg.FloatingAxis.bShowMarkers) { FSlateDrawElement::MakeLines( OutDrawElements, AxisLayerId, PlotSpaceGeometry.ToPaintGeometry(), Points, //ClipRect, ESlateDrawEffect::None, ChartStyle->ChartLineColor, true, ChartStyle->ChartLineThickness ); } if (XAxisCfg.FloatingAxis.bShowLabels) { const FText UnsignedLabelText = FText::FromString(RoundedMarkerX.Abs().MultiplierAsString(MarkerData.XAxis.DisplayPower)); const auto UnsignedLabelExtents = FontMeasureService->Measure(UnsignedLabelText, AxisMarkerFont); const FText LabelText = FText::FromString(RoundedMarkerX.MultiplierAsString(MarkerData.XAxis.DisplayPower)); const auto LabelExtents = FontMeasureService->Measure(LabelText, AxisMarkerFont); const auto LabelGeometry = PlotSpaceGeometry.MakeChild( LabelExtents, FSlateLayoutTransform(Points[1] + FVector2D(-UnsignedLabelExtents.X * 0.5f - (LabelExtents.X - UnsignedLabelExtents.X), LabelYOffset)) ); // Test if the text can be fully displayed within the clip bounds, and if not, don't draw it at all if(!WillGeometryGetClipped(LabelGeometry, ClipRect)) { FSlateDrawElement::MakeText( OutDrawElements, LabelLayerId, LabelGeometry.ToPaintGeometry(), LabelText, AxisMarkerFont, //ClipRect, ESlateDrawEffect::None, ChartStyle->FontColor); } } } } // Vertical axis if (YAxisCfg.FloatingAxis.bEnabled) { const float X0 = FMath::Clamp(LocalOrigin.X, 0.0f, PlotSize.X); { TArray< FVector2D > Points; Points.Add(FVector2D( X0, 0.0f )); Points.Add(FVector2D( X0, PlotSize.Y )); FSlateDrawElement::MakeLines( OutDrawElements, AxisLayerId, PlotSpaceGeometry.ToPaintGeometry(), Points, //ClipRect, ESlateDrawEffect::None, ChartStyle->ChartLineColor, false, ChartStyle->ChartLineThickness ); } const auto YRounding = MarkerData.YAxis.RL; const auto LabelMaxExtents = DetermineAxisValueLabelMaxExtents(EAxis::Y, YAxisCfg.MaxValueDigits); const bool bFitsLeft = LabelMaxExtents.X < X0; // Axis markers and labels const auto YStart = YRounding.RoundUp(CartesianRangeMin.Y); for (auto RoundedMarkerY = YStart; RoundedMarkerY.GetFloatValue() < CartesianRangeMax.Y; ++RoundedMarkerY) { if (RoundedMarkerY.IsZero()) // @TODO: && other floating axis is drawn { continue; } const auto MarkerY = RoundedMarkerY.GetFloatValue(); auto const MarkerXOffset = bFitsLeft ? -ChartConstants::AxisMarkerLength : ChartConstants::AxisMarkerLength; TArray< FVector2D > Points; const auto MarkerYPlotSpace = CartesianToPlotXform.TransformPoint(FVector2D(0.0f, MarkerY)).Y; Points.Add(FVector2D( X0, MarkerYPlotSpace )); Points.Add(FVector2D( X0 + MarkerXOffset, MarkerYPlotSpace )); if (YAxisCfg.FloatingAxis.bShowMarkers) { FSlateDrawElement::MakeLines( OutDrawElements, AxisLayerId, PlotSpaceGeometry.ToPaintGeometry(), Points, //ClipRect, ESlateDrawEffect::None, ChartStyle->ChartLineColor, true, ChartStyle->ChartLineThickness ); } if (YAxisCfg.FloatingAxis.bShowLabels) { const FText LabelText = FText::FromString(RoundedMarkerY.MultiplierAsString(MarkerData.YAxis.DisplayPower)); const auto LabelExtents = FontMeasureService->Measure(LabelText, AxisMarkerFont); auto const LabelXOffset = bFitsLeft ? -LabelExtents.X : 0.0f; auto LabelGeometry = PlotSpaceGeometry.MakeChild( LabelExtents, FSlateLayoutTransform(Points[1] + FVector2D(LabelXOffset, -LabelExtents.Y * 0.5f)) ); // Test if the text can be fully displayed within the clip bounds, and if not, don't draw it at all if(!WillGeometryGetClipped(LabelGeometry, ClipRect)) { FSlateDrawElement::MakeText( OutDrawElements, LabelLayerId, LabelGeometry.ToPaintGeometry(), LabelText, AxisMarkerFont, //ClipRect, ESlateDrawEffect::None, ChartStyle->FontColor); } } } } return LabelLayerId + 1; } void SKantanCartesianChart::InvalidateCachedMarkerData(EAxis::Type Axis) const { switch(Axis) { case EAxis::X: XAxisMarkers.Reset(); break; case EAxis::Y: YAxisMarkers.Reset(); break; } } const AxisUtil::FAxisMarkerData& SKantanCartesianChart::GetCachedMarkerData(EAxis::Type Axis, FGeometry const& PlotSpaceGeometry) const { switch(Axis) { case EAxis::X: if(XAxisMarkers.IsSet() == false) { XAxisMarkers = DetermineAxisMarkerData(PlotSpaceGeometry, Axis); } return XAxisMarkers.GetValue(); case EAxis::Y: if(YAxisMarkers.IsSet() == false) { YAxisMarkers = DetermineAxisMarkerData(PlotSpaceGeometry, Axis); } return YAxisMarkers.GetValue(); default: { check(false); static const auto Dummy = AxisUtil::FAxisMarkerData(); return Dummy; } } } float SKantanCartesianChart::GetChartAreaSize(EChartContentArea::Type Area, EAxis::Type ReqComp, FVector2D const& KnownPlotSize) const { switch (Area) { case EChartContentArea::XAxisBottomTitle: return ReqComp == EAxis::Y && XAxisCfg.LeftBottomAxis.bEnabled && XAxisCfg.LeftBottomAxis.bShowTitle ? DetermineAxisTitleSize(XAxisCfg, EAxis::X).Y : 0.0f; case EChartContentArea::XAxisTopTitle: return ReqComp == EAxis::Y && XAxisCfg.RightTopAxis.bEnabled && XAxisCfg.RightTopAxis.bShowTitle ? DetermineAxisTitleSize(XAxisCfg, EAxis::X).Y : 0.0f; case EChartContentArea::YAxisLeftTitle: return ReqComp == EAxis::X && YAxisCfg.LeftBottomAxis.bEnabled && YAxisCfg.LeftBottomAxis.bShowTitle ? DetermineAxisTitleSize(YAxisCfg, EAxis::Y).X : 0.0f; case EChartContentArea::YAxisRightTitle: return ReqComp == EAxis::X && YAxisCfg.RightTopAxis.bEnabled && YAxisCfg.RightTopAxis.bShowTitle ? DetermineAxisTitleSize(YAxisCfg, EAxis::Y).X : 0.0f; case EChartContentArea::XAxisBottom: return ReqComp == EAxis::Y && XAxisCfg.LeftBottomAxis.bEnabled ? DetermineAxisRequiredWidth(EAxis::X, XAxisCfg.MaxValueDigits, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap) : 0.0f; case EChartContentArea::XAxisTop: return ReqComp == EAxis::Y && XAxisCfg.RightTopAxis.bEnabled ? DetermineAxisRequiredWidth(EAxis::X, XAxisCfg.MaxValueDigits, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap) : 0.0f; case EChartContentArea::YAxisLeft: return ReqComp == EAxis::X && YAxisCfg.LeftBottomAxis.bEnabled ? DetermineAxisRequiredWidth(EAxis::Y, YAxisCfg.MaxValueDigits, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap) : 0.0f; case EChartContentArea::YAxisRight: return ReqComp == EAxis::X && YAxisCfg.RightTopAxis.bEnabled ? DetermineAxisRequiredWidth(EAxis::Y, YAxisCfg.MaxValueDigits, ChartConstants::AxisMarkerLength, ChartConstants::AxisMarkerLabelGap) : 0.0f; default: return 0.0f; } } FSlateRenderTransform SKantanCartesianChart::CartesianToPlotTransform(FGeometry const& PlotSpaceGeometry) const { return PlotScale.GetTransformFromCartesianSpace(PlotSpaceGeometry.GetLocalSize()); } FFloatRoundingLevel SKantanCartesianChart::DetermineAxisRoundingLevel(FGeometry const& PlotSpaceGeometry, EAxis::Type Axis) const { auto const AxisIdx = (Axis == EAxis::X ? 0 : 1); auto const CartesianToPlotXform = CartesianToPlotTransform(PlotSpaceGeometry); auto const CartesianToPlotAxisTransform = AxisUtil::FAxisTransform::FromTransform2D(CartesianToPlotXform, AxisIdx); auto const DefaultMinPlotSpaceLabelSeparation = 50.0f; auto MinPlotSpaceLabelSeparation = DefaultMinPlotSpaceLabelSeparation * (Axis == EAxis::X ? XAxisCfg.MarkerSpacing : YAxisCfg.MarkerSpacing); MinPlotSpaceLabelSeparation = FMath::Max(MinPlotSpaceLabelSeparation, 1.0f); return AxisUtil::DetermineAxisRoundingLevel(CartesianToPlotAxisTransform, MinPlotSpaceLabelSeparation); } AxisUtil::FAxisMarkerData SKantanCartesianChart::DetermineAxisMarkerData(FGeometry const& PlotSpaceGeometry, EAxis::Type Axis) const { auto const AxisIdx = Axis == EAxis::X ? 0 : 1; auto CartesianToPlotXform = CartesianToPlotTransform(PlotSpaceGeometry); auto CartesianRangeMin = ::Inverse(CartesianToPlotXform).TransformPoint(FVector2D::ZeroVector); auto CartesianRangeMax = ::Inverse(CartesianToPlotXform).TransformPoint(PlotSpaceGeometry.GetLocalSize()); auto AxisCartesianRange = FCartesianAxisRange(CartesianRangeMin[AxisIdx], CartesianRangeMax[AxisIdx]).Normalized(); auto const& AxisCfg = Axis == EAxis::X ? XAxisCfg : YAxisCfg; return AxisUtil::DetermineAxisMarkerData( DetermineAxisRoundingLevel(PlotSpaceGeometry, Axis), AxisCartesianRange, AxisCfg.MaxValueDigits ); } void SKantanCartesianChart::AddReferencedObjects(FReferenceCollector& Collector) { if (Datasource) { Collector.AddReferencedObject(Datasource); } }
#pragma once #include <glm/glm.hpp> #define PI 3.14159265358979323846 namespace YokaiPhysics { /** * @brief Calculates Rectangular Inertia Tensor * @param ext * @param mass * @return glm::mat3x3 */ glm::mat3x3 RectangleInertiaTensor(glm::dvec3 ext, double mass); /** * @brief Calculates Spherical Inertia Tensor * @param radius * @param mass * @return glm::mat3x3 */ glm::mat3x3 SphereInertiaTensor(double radius, double mass); /** * @brief Degrees To radians * @param degrees * @return double */ double DegreesToRadians(double degrees); /** * @brief Radians to Degrees * @param radians * @return double */ double RadiansToDegrees(double radians); }
#include <CQChartsExprDataModel.h> CQChartsExprDataModel:: CQChartsExprDataModel(int n) : CQDataModel(1, n), n_(n) { }
// Copyright 1998-2017 Epic Games, Inc. All Rights Reserved. #include "Components/WidgetSwitcherSlot.h" #include "SlateFwd.h" #include "Components/Widget.h" ///////////////////////////////////////////////////// // UWidgetSwitcherSlot UWidgetSwitcherSlot::UWidgetSwitcherSlot(const FObjectInitializer& ObjectInitializer) : Super(ObjectInitializer) , Slot(NULL) { HorizontalAlignment = HAlign_Fill; VerticalAlignment = VAlign_Fill; } void UWidgetSwitcherSlot::ReleaseSlateResources(bool bReleaseChildren) { Super::ReleaseSlateResources(bReleaseChildren); Slot = NULL; } void UWidgetSwitcherSlot::BuildSlot(TSharedRef<SWidgetSwitcher> WidgetSwitcher) { Slot = &WidgetSwitcher->AddSlot() .Padding(Padding) .HAlign(HorizontalAlignment) .VAlign(VerticalAlignment) [ Content == NULL ? SNullWidget::NullWidget : Content->TakeWidget() ]; } void UWidgetSwitcherSlot::SetContent(UWidget* NewContent) { Content = NewContent; if (Slot) { Slot->AttachWidget(NewContent ? NewContent->TakeWidget() : SNullWidget::NullWidget); } } void UWidgetSwitcherSlot::SetPadding(FMargin InPadding) { Padding = InPadding; if ( Slot ) { Slot->Padding(InPadding); } } void UWidgetSwitcherSlot::SetHorizontalAlignment(EHorizontalAlignment InHorizontalAlignment) { HorizontalAlignment = InHorizontalAlignment; if ( Slot ) { Slot->HAlign(InHorizontalAlignment); } } void UWidgetSwitcherSlot::SetVerticalAlignment(EVerticalAlignment InVerticalAlignment) { VerticalAlignment = InVerticalAlignment; if ( Slot ) { Slot->VAlign(InVerticalAlignment); } } void UWidgetSwitcherSlot::SynchronizeProperties() { SetPadding(Padding); SetHorizontalAlignment(HorizontalAlignment); SetVerticalAlignment(VerticalAlignment); }
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file has been auto-generated by code_generator_v8.py. DO NOT MODIFY! #include "config.h" #include "V8SVGTSpanElement.h" #include "bindings/core/v8/ExceptionState.h" #include "bindings/core/v8/V8DOMConfiguration.h" #include "bindings/core/v8/V8ObjectConstructor.h" #include "core/dom/ContextFeatures.h" #include "core/dom/Document.h" #include "platform/RuntimeEnabledFeatures.h" #include "platform/TraceEvent.h" #include "wtf/GetPtr.h" #include "wtf/RefPtr.h" namespace blink { // Suppress warning: global constructors, because struct WrapperTypeInfo is trivial // and does not depend on another global objects. #if defined(COMPONENT_BUILD) && defined(WIN32) && COMPILER(CLANG) #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wglobal-constructors" #endif const WrapperTypeInfo V8SVGTSpanElement::wrapperTypeInfo = { gin::kEmbedderBlink, V8SVGTSpanElement::domTemplate, V8SVGTSpanElement::refObject, V8SVGTSpanElement::derefObject, V8SVGTSpanElement::trace, 0, 0, V8SVGTSpanElement::preparePrototypeObject, V8SVGTSpanElement::installConditionallyEnabledProperties, "SVGTSpanElement", &V8SVGTextPositioningElement::wrapperTypeInfo, WrapperTypeInfo::WrapperTypeObjectPrototype, WrapperTypeInfo::NodeClassId, WrapperTypeInfo::InheritFromEventTarget, WrapperTypeInfo::Dependent, WrapperTypeInfo::WillBeGarbageCollectedObject }; #if defined(COMPONENT_BUILD) && defined(WIN32) && COMPILER(CLANG) #pragma clang diagnostic pop #endif // This static member must be declared by DEFINE_WRAPPERTYPEINFO in SVGTSpanElement.h. // For details, see the comment of DEFINE_WRAPPERTYPEINFO in // bindings/core/v8/ScriptWrappable.h. const WrapperTypeInfo& SVGTSpanElement::s_wrapperTypeInfo = V8SVGTSpanElement::wrapperTypeInfo; namespace SVGTSpanElementV8Internal { } // namespace SVGTSpanElementV8Internal static void installV8SVGTSpanElementTemplate(v8::Local<v8::FunctionTemplate> functionTemplate, v8::Isolate* isolate) { functionTemplate->ReadOnlyPrototype(); v8::Local<v8::Signature> defaultSignature; defaultSignature = V8DOMConfiguration::installDOMClassTemplate(isolate, functionTemplate, "SVGTSpanElement", V8SVGTextPositioningElement::domTemplate(isolate), V8SVGTSpanElement::internalFieldCount, 0, 0, 0, 0, 0, 0); v8::Local<v8::ObjectTemplate> instanceTemplate = functionTemplate->InstanceTemplate(); ALLOW_UNUSED_LOCAL(instanceTemplate); v8::Local<v8::ObjectTemplate> prototypeTemplate = functionTemplate->PrototypeTemplate(); ALLOW_UNUSED_LOCAL(prototypeTemplate); // Custom toString template functionTemplate->Set(v8AtomicString(isolate, "toString"), V8PerIsolateData::from(isolate)->toStringTemplate()); } v8::Local<v8::FunctionTemplate> V8SVGTSpanElement::domTemplate(v8::Isolate* isolate) { return V8DOMConfiguration::domClassTemplate(isolate, const_cast<WrapperTypeInfo*>(&wrapperTypeInfo), installV8SVGTSpanElementTemplate); } bool V8SVGTSpanElement::hasInstance(v8::Local<v8::Value> v8Value, v8::Isolate* isolate) { return V8PerIsolateData::from(isolate)->hasInstance(&wrapperTypeInfo, v8Value); } v8::Local<v8::Object> V8SVGTSpanElement::findInstanceInPrototypeChain(v8::Local<v8::Value> v8Value, v8::Isolate* isolate) { return V8PerIsolateData::from(isolate)->findInstanceInPrototypeChain(&wrapperTypeInfo, v8Value); } SVGTSpanElement* V8SVGTSpanElement::toImplWithTypeCheck(v8::Isolate* isolate, v8::Local<v8::Value> value) { return hasInstance(value, isolate) ? toImpl(v8::Local<v8::Object>::Cast(value)) : 0; } void V8SVGTSpanElement::refObject(ScriptWrappable* scriptWrappable) { #if !ENABLE(OILPAN) scriptWrappable->toImpl<SVGTSpanElement>()->ref(); #endif } void V8SVGTSpanElement::derefObject(ScriptWrappable* scriptWrappable) { #if !ENABLE(OILPAN) scriptWrappable->toImpl<SVGTSpanElement>()->deref(); #endif } } // namespace blink
/* * Copyright 2009-2017 Alibaba Cloud All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <alibabacloud/cloudapi/model/DescribeDomainRequest.h> using AlibabaCloud::CloudAPI::Model::DescribeDomainRequest; DescribeDomainRequest::DescribeDomainRequest() : RpcServiceRequest("cloudapi", "2016-07-14", "DescribeDomain") { setMethod(HttpRequest::Method::Post); } DescribeDomainRequest::~DescribeDomainRequest() {} std::string DescribeDomainRequest::getGroupId()const { return groupId_; } void DescribeDomainRequest::setGroupId(const std::string& groupId) { groupId_ = groupId; setParameter("GroupId", groupId); } std::string DescribeDomainRequest::getDomainName()const { return domainName_; } void DescribeDomainRequest::setDomainName(const std::string& domainName) { domainName_ = domainName; setParameter("DomainName", domainName); } std::string DescribeDomainRequest::getAccessKeyId()const { return accessKeyId_; } void DescribeDomainRequest::setAccessKeyId(const std::string& accessKeyId) { accessKeyId_ = accessKeyId; setParameter("AccessKeyId", accessKeyId); } std::string DescribeDomainRequest::getSecurityToken()const { return securityToken_; } void DescribeDomainRequest::setSecurityToken(const std::string& securityToken) { securityToken_ = securityToken; setParameter("SecurityToken", securityToken); }
// Author: btjanaka (Bryon Tjanaka) // Problem: (UVa) 11906 #include <bits/stdc++.h> #define GET(x) scanf("%d", &x) #define GED(x) scanf("%lf", &x) typedef long long ll; using namespace std; typedef pair<int, int> ii; int r, c, m, n, w, ri, ci; bool water[110][110]; vector<int> dr; vector<int> dc; int ditr; // how many of dr and dc to itr thru int even, odd; bool visited[110][110]; void dfs(int rcur, int ccur) { if (visited[rcur][ccur]) return; visited[rcur][ccur] = true; int count = 0; for (int i = 0; i < ditr; ++i) { int r2 = rcur + dr[i]; int c2 = ccur + dc[i]; if (!(r2 < 0 || r2 >= r || c2 < 0 || c2 >= c || water[r2][c2])) { ++count; dfs(r2, c2); } } if (count % 2 == 0) { ++even; } else { ++odd; } } int main() { int ca; GET(ca); for (int caa = 1; caa <= ca; ++caa) { GET(r); GET(c); GET(m); GET(n); GET(w); memset(water, false, sizeof(water)); for (int i = 0; i < w; ++i) { GET(ri); GET(ci); water[ri][ci] = true; } // determine itr if (m == n) { ditr = 4; } else { ditr = 8; } dr = {m, m, -m, -m, n, n, -n, -n}; dc = {n, -n, n, -n, m, -m, m, -m}; if (m == 0 || n == 0) { int mx = max(m, n); dr = {mx, -mx, 0, 0}; dc = {0, 0, mx, -mx}; } memset(visited, false, sizeof(visited)); even = odd = 0; dfs(0, 0); printf("Case %d: %d %d\n", caa, even, odd); } return 0; }
#include "test_unsigned_int_to_char_vector.h" void runTestUnsignedIntToCharVectorTests() { runTestUnsignedIntToCharVectorTestOne(); runTestUnsignedIntToCharVectorTestTwo(); runTestUnsignedIntToCharVectorTestThree(); runTestUnsignedIntToCharVectorTestFour(); } void runTestUnsignedIntToCharVectorTestOne() { std::vector<unsigned char> expectedValue; expectedValue.push_back(0x00); expectedValue.push_back(0x00); expectedValue.push_back(0x07); expectedValue.push_back(0x08); std::vector<unsigned char> actualValue = unsignedIntToUnsignedCharVector(1800); bool hasTestPassed = true; for(int i = 0; i < 4; i ++) { if(expectedValue.at(i) != actualValue.at(i)) { hasTestPassed = false; } } if(hasTestPassed) { std::cout << "runTestUnsignedIntToCharVectorTestOne PASS" << std::endl; } else { std::cout << "runTestUnsignedIntToCharVectorTestOne FAIL. Expected: " << std::hex << "0x" << expectedValue.at(0) << ", 0x"<< expectedValue.at(1) << ", 0x" << expectedValue.at(2) << ", 0x" << expectedValue.at(3) << " Actual: "<< std::hex << "0x" << actualValue.at(0) << ", 0x" << actualValue.at(1) << ", 0x" << actualValue.at(2) << ", 0x" << actualValue.at(3) << ", 0x" << actualValue.at(4) << std::dec << std::endl; } } void runTestUnsignedIntToCharVectorTestTwo() { std::vector<unsigned char> expectedValue; expectedValue.push_back(0x00); expectedValue.push_back(0x00); expectedValue.push_back(0x00); expectedValue.push_back(0x01); std::vector<unsigned char> actualValue = unsignedIntToUnsignedCharVector(1); bool hasTestPassed = true; for(int i = 0; i < 4; i ++) { if(expectedValue.at(i) != actualValue.at(i)) { hasTestPassed = false; } } if(hasTestPassed) { std::cout << "runTestUnsignedIntToCharVectorTestTwo PASS" << std::endl; } else { std::cout << "runTestUnsignedIntToCharVectorTestTwo FAIL. Expected: " << std::hex << "0x" << expectedValue.at(0) << ", 0x"<< expectedValue.at(1) << ", 0x" << expectedValue.at(2) << ", 0x" << expectedValue.at(3) << " Actual: "<< std::hex << "0x" << actualValue.at(0) << ", 0x" << actualValue.at(1) << ", 0x" << actualValue.at(2) << ", 0x" << actualValue.at(3) << ", 0x" << actualValue.at(4) << std::dec << std::endl; } } void runTestUnsignedIntToCharVectorTestThree() { std::vector<unsigned char> expectedValue; expectedValue.push_back(0x00); expectedValue.push_back(0x00); expectedValue.push_back(0x00); expectedValue.push_back(0x32); std::vector<unsigned char> actualValue = unsignedIntToUnsignedCharVector(50); bool hasTestPassed = true; for(int i = 0; i < 4; i ++) { if(expectedValue.at(i) != actualValue.at(i)) { hasTestPassed = false; } } if(hasTestPassed) { std::cout << "runTestUnsignedIntToCharVectorTestThree PASS" << std::endl; } else { std::cout << "runTestUnsignedIntToCharVectorTestThree FAIL. Expected: " << std::hex << "0x" << expectedValue.at(0) << ", 0x"<< expectedValue.at(1) << ", 0x" << expectedValue.at(2) << ", 0x" << expectedValue.at(3) << " Actual: "<< std::hex << "0x" << actualValue.at(0) << ", 0x" << actualValue.at(1) << ", 0x" << actualValue.at(2) << ", 0x" << actualValue.at(3) << ", 0x" << actualValue.at(4) << std::dec << std::endl; } } void runTestUnsignedIntToCharVectorTestFour() { std::vector<unsigned char> expectedValue; expectedValue.push_back(0x00); expectedValue.push_back(0x00); expectedValue.push_back(0x01); expectedValue.push_back(0xF4); std::vector<unsigned char> actualValue = unsignedIntToUnsignedCharVector(500); bool hasTestPassed = true; for(int i = 0; i < 4; i ++) { if(expectedValue.at(i) != actualValue.at(i)) { hasTestPassed = false; } } if(hasTestPassed) { std::cout << "runTestUnsignedIntToCharVectorTestFour PASS" << std::endl; } else { std::cout << "runTestUnsignedIntToCharVectorTestFour FAIL. Expected: " << std::hex << "0x" << expectedValue.at(0) << ", 0x"<< expectedValue.at(1) << ", 0x" << expectedValue.at(2) << ", 0x" << expectedValue.at(3) << " Actual: "<< std::hex << "0x" << actualValue.at(0) << ", 0x" << actualValue.at(1) << ", 0x" << actualValue.at(2) << ", 0x" << actualValue.at(3) << ", 0x" << actualValue.at(4) << std::dec << std::endl; } }
/*============================================================================= Copyright (c) 2011-2017 Bolero MURAKAMI https://github.com/bolero-MURAKAMI/Sprout Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #ifndef SPROUT_ITERATOR_JOINT_ITERATOR_HPP #define SPROUT_ITERATOR_JOINT_ITERATOR_HPP #include <sprout/config.hpp> #include <sprout/iterator/iterator.hpp> #include <sprout/iterator/next.hpp> #include <sprout/iterator/prev.hpp> #include <sprout/iterator/distance.hpp> #include <sprout/iterator/type_traits/common.hpp> #include <sprout/utility/swap.hpp> #include <sprout/utility/limited.hpp> #include <sprout/type_traits/arithmetic_promote.hpp> namespace sprout { // // joint_iterator // template<typename LIterator, typename RIterator> class joint_iterator : public sprout::iterator< typename sprout::common_iterator_category<LIterator, RIterator>::type, typename sprout::common_iterator_value_type<LIterator, RIterator>::type, typename sprout::common_iterator_difference_type<LIterator, RIterator>::type, typename sprout::common_iterator_pointer<LIterator, RIterator>::type, typename sprout::common_iterator_reference<LIterator, RIterator>::type > { public: typedef LIterator iterator_type; typedef RIterator iterator2_type; typedef typename sprout::common_iterator_category<LIterator, RIterator>::type iterator_category; typedef typename sprout::common_iterator_value_type<LIterator, RIterator>::type value_type; typedef typename sprout::common_iterator_difference_type<LIterator, RIterator>::type difference_type; typedef typename sprout::common_iterator_pointer<LIterator, RIterator>::type pointer; typedef typename sprout::common_iterator_reference<LIterator, RIterator>::type reference; protected: iterator_type current1; iterator_type last1; iterator2_type first2; iterator2_type current2; private: SPROUT_CONSTEXPR joint_iterator advance_impl_posite(difference_type n, difference_type d) const { return n > d ? joint_iterator(last1, last1, first2, sprout::next(current2, n - d)) : joint_iterator(sprout::next(current1, n), last1, first2, current2) ; } SPROUT_CONSTEXPR joint_iterator advance_impl_negate(difference_type n, difference_type d) const { return !(n > d) ? joint_iterator(sprout::next(current1, n - d), last1, first2, first2) : joint_iterator(current1, last1, first2, sprout::next(current2, n)) ; } SPROUT_CONSTEXPR joint_iterator advance_impl(difference_type n) const { return n >= 0 ? is_in_left() ? advance_impl_posite(n, sprout::distance(current1, last1)) : joint_iterator(current1, last1, first2, sprout::next(current2, n)) : first2 == current2 ? joint_iterator(sprout::next(current1, n), last1, first2, current2) : advance_impl_negate(n, -sprout::distance(first2, current2)) ; } public: SPROUT_CONSTEXPR joint_iterator() : current1(), last1(), first2(), current2() {} joint_iterator(joint_iterator const&) = default; SPROUT_CONSTEXPR joint_iterator(iterator_type it1, iterator_type last1, iterator2_type first2, iterator2_type it2) : current1(it1) , last1(last1) , first2(first2) , current2(it2) {} template<typename U, typename V> SPROUT_CONSTEXPR joint_iterator(joint_iterator<U, V> const& it) : current1(it.base()) , last1(it.left_end()) , first2(it.right_begin()) , current2(it.base2()) {} template<typename U, typename V> SPROUT_CXX14_CONSTEXPR joint_iterator& operator=(joint_iterator<U, V> const& it) { joint_iterator temp(it); temp.swap(*this); return *this; } SPROUT_CONSTEXPR iterator_type base() const { return current1; } SPROUT_CONSTEXPR iterator_type left_end() const { return last1; } SPROUT_CONSTEXPR iterator2_type right_begin() const { return first2; } SPROUT_CONSTEXPR iterator2_type base2() const { return current2; } SPROUT_CONSTEXPR bool is_in_left() const { return current1 != last1; } SPROUT_CONSTEXPR reference operator*() const { return is_in_left() ? *current1 : *current2; } SPROUT_CONSTEXPR pointer operator->() const { return &*(*this); } SPROUT_CXX14_CONSTEXPR joint_iterator& operator++() { if (is_in_left()) { ++current1; } else { ++current2; } return *this; } SPROUT_CXX14_CONSTEXPR joint_iterator operator++(int) { joint_iterator result(*this); if (is_in_left()) { ++current1; } else { ++current2; } return result; } SPROUT_CXX14_CONSTEXPR joint_iterator& operator--() { if (first2 == current2) { --current1; } else { --current2; } return *this; } SPROUT_CXX14_CONSTEXPR joint_iterator operator--(int) { joint_iterator temp(*this); if (first2 == current2) { --current1; } else { --current2; } return temp; } SPROUT_CONSTEXPR joint_iterator operator+(difference_type n) const { return advance_impl(n); } SPROUT_CONSTEXPR joint_iterator operator-(difference_type n) const { return advance_impl(-n); } SPROUT_CXX14_CONSTEXPR joint_iterator& operator+=(difference_type n) { joint_iterator temp(*this + n); temp.swap(*this); return *this; } SPROUT_CXX14_CONSTEXPR joint_iterator& operator-=(difference_type n) { joint_iterator temp(*this - n); temp.swap(*this); return *this; } SPROUT_CONSTEXPR reference operator[](difference_type n) const { return *(*this + n); } SPROUT_CONSTEXPR joint_iterator next() const { return is_in_left() ? joint_iterator(sprout::next(current1), last1, first2, current2) : joint_iterator(current1, last1, first2, sprout::next(current2)) ; } SPROUT_CONSTEXPR joint_iterator prev() const { return first2 == current2 ? joint_iterator(sprout::prev(current1), last1, first2, current2) : joint_iterator(current1, last1, first2, sprout::prev(current2)) ; } SPROUT_CXX14_CONSTEXPR void swap(joint_iterator& other) SPROUT_NOEXCEPT_IF( SPROUT_NOEXCEPT_EXPR(sprout::swap(current1, other.current1)) && SPROUT_NOEXCEPT_EXPR(sprout::swap(last1, other.last1)) && SPROUT_NOEXCEPT_EXPR(sprout::swap(first2, other.first2)) && SPROUT_NOEXCEPT_EXPR(sprout::swap(current2, other.current2)) ) { sprout::swap(current1, other.current1); sprout::swap(last1, other.last1); sprout::swap(first2, other.first2); sprout::swap(current2, other.current2); } }; template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR bool operator==( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return lhs.base() == rhs.base() && lhs.base2() == rhs.base2(); } template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR bool operator!=( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return !(lhs == rhs); } template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR bool operator<( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return lhs.is_in_left() && rhs.is_in_left() ? lhs.base() < rhs.base() : !lhs.is_in_left() && !rhs.is_in_left() ? lhs.base2() < rhs.base2() : lhs.is_in_left() ; } template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR bool operator>( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return rhs < lhs; } template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR bool operator<=( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return !(rhs < lhs); } template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR bool operator>=( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return !(lhs < rhs); } template< typename LIterator1, typename RIterator1, typename LIterator2, typename RIterator2 > inline SPROUT_CONSTEXPR typename sprout::arithmetic_promote< typename sprout::joint_iterator<LIterator1, RIterator1>::difference_type, typename sprout::joint_iterator<LIterator2, RIterator2>::difference_type >::type operator-( sprout::joint_iterator<LIterator1, RIterator1> const& lhs, sprout::joint_iterator<LIterator2, RIterator2> const& rhs ) { return lhs.is_in_left() && rhs.is_in_left() ? lhs.base() - rhs.base() : !lhs.is_in_left() && !rhs.is_in_left() ? lhs.base2() - rhs.base2() : lhs.is_in_left() ? sprout::limited::plus(lhs.left_end() - lhs.base(), rhs.base2() - rhs.right_begin()) : sprout::limited::plus(lhs.base2() - lhs.right_begin(), rhs.left_end() - rhs.base()) ; } template<typename LIterator, typename RIterator> inline SPROUT_CONSTEXPR sprout::joint_iterator<LIterator, RIterator> operator+( typename sprout::joint_iterator<LIterator, RIterator>::difference_type n, sprout::joint_iterator<LIterator, RIterator> const& it ) { return it + n; } // // make_joint_iterator // template<typename LIterator, typename RIterator> inline SPROUT_CONSTEXPR sprout::joint_iterator<LIterator, RIterator> make_joint_iterator(LIterator it1, LIterator last1, RIterator first2, RIterator it2) { return sprout::joint_iterator<LIterator, RIterator>(it1, last1, first2, it2); } template<typename LIterator, typename RIterator> inline SPROUT_CONSTEXPR sprout::joint_iterator<LIterator, RIterator> make_joint_iterator(LIterator it1, LIterator last1, RIterator first2) { return sprout::joint_iterator<LIterator, RIterator>(it1, last1, first2, first2); } template<typename LIterator, typename RIterator> inline SPROUT_CONSTEXPR sprout::joint_iterator<LIterator, RIterator> make_joint_iterator(LIterator last1, RIterator first2, RIterator it2) { return sprout::joint_iterator<LIterator, RIterator>(last1, last1, first2, it2); } // // swap // template<typename LIterator, typename RIterator> inline SPROUT_CXX14_CONSTEXPR void swap( sprout::joint_iterator<LIterator, RIterator>& lhs, sprout::joint_iterator<LIterator, RIterator>& rhs ) SPROUT_NOEXCEPT_IF_EXPR(lhs.swap(rhs)) { lhs.swap(rhs); } // // iterator_next // template<typename LIterator, typename RIterator> inline SPROUT_CONSTEXPR sprout::joint_iterator<LIterator, RIterator> iterator_next(sprout::joint_iterator<LIterator, RIterator> const& it) { return it.next(); } // // iterator_prev // template<typename LIterator, typename RIterator> inline SPROUT_CONSTEXPR sprout::joint_iterator<LIterator, RIterator> iterator_prev(sprout::joint_iterator<LIterator, RIterator> const& it) { return it.prev(); } } // namespace sprout #endif // #ifndef SPROUT_ITERATOR_JOINT_ITERATOR_HPP
/* * Copyright (C) 2021 Huawei Device Co., Ltd. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "media_data_source_callback.h" #include "media_log.h" #include "media_errors.h" namespace { constexpr OHOS::HiviewDFX::HiLogLabel LABEL = {LOG_CORE, LOG_DOMAIN, "MediaDataSourceImpl"}; } namespace OHOS { namespace Media { std::shared_ptr<MediaDataSourceCallback> MediaDataSourceCallback::Create(napi_env env, napi_value dataSrcNapi) { CHECK_AND_RETURN_RET_LOG(env != nullptr && dataSrcNapi != nullptr, nullptr, "env or src is nullptr"); MediaDataSourceNapi *data = nullptr; napi_status status = napi_unwrap(env, dataSrcNapi, reinterpret_cast<void **>(&data)); CHECK_AND_RETURN_RET_LOG(status == napi_ok && data != nullptr, nullptr, "unwarp failed"); CHECK_AND_RETURN_RET_LOG(data->CallbackCheckAndSetNoChange() == MSERR_OK, nullptr, "check callback failed"); uint32_t refCount = 1; napi_ref ref = nullptr; status = napi_create_reference(env, dataSrcNapi, refCount, &(ref)); CHECK_AND_RETURN_RET_LOG(status == napi_ok && ref != nullptr, nullptr, "create ref failed"); std::shared_ptr<MediaDataSourceCallback> mediaDataSource = std::make_shared<MediaDataSourceCallback>(env, ref, *data); if (mediaDataSource == nullptr) { status = napi_reference_unref(env, ref, &refCount); CHECK_AND_RETURN_RET_LOG(status == napi_ok, nullptr, "unref failed"); } return mediaDataSource; } MediaDataSourceCallback::MediaDataSourceCallback(napi_env env, napi_ref ref, MediaDataSourceNapi &src) : env_(env), napiSrcRef_(ref), dataSrc_(src) { MEDIA_LOGD("0x%{public}06" PRIXPTR " Instances create", FAKE_POINTER(this)); } MediaDataSourceCallback::~MediaDataSourceCallback() { MEDIA_LOGD("0x%{public}06" PRIXPTR " Instances create", FAKE_POINTER(this)); if (napiSrcRef_ != nullptr) { uint32_t refCount = 0; napi_reference_unref(env_, napiSrcRef_, &refCount); napiSrcRef_ = nullptr; } env_ = nullptr; } void MediaDataSourceCallback::Release() const { dataSrc_.Release(); } napi_value MediaDataSourceCallback::GetDataSrc() const { napi_value undefinedResult = nullptr; napi_get_undefined(env_, &undefinedResult); napi_value jsResult = nullptr; napi_status status = napi_get_reference_value(env_, napiSrcRef_, &jsResult); CHECK_AND_RETURN_RET_LOG(status == napi_ok && jsResult != nullptr, undefinedResult, "get reference value fail"); return jsResult; } int32_t MediaDataSourceCallback::ReadAt(int64_t pos, uint32_t length, const std::shared_ptr<AVSharedMemory> &mem) { return dataSrc_.ReadAt(pos, length, mem); } int32_t MediaDataSourceCallback::ReadAt(uint32_t length, const std::shared_ptr<AVSharedMemory> &mem) { return dataSrc_.ReadAt(length, mem); } int32_t MediaDataSourceCallback::GetSize(int64_t &size) { return dataSrc_.GetSize(size); } } // Media } // OHOS
#ifndef __REQUEST_DISPATCHER_HPP__ #define __REQUEST_DISPATCHER_CPP__ namespace http { namespace server { class request_dispatcher { public: request_dispatcher() {} }; } } #endif
#pragma once #include <fc/exception/exception.hpp> #include <snax/chain/exceptions.hpp> #include <snax/chain/controller.hpp> #include <snax/chain/wasm_snax_binary_ops.hpp> #include <functional> #include <vector> #include <iostream> #include "IR/Module.h" #include "IR/Operators.h" #include "WASM/WASM.h" namespace snax { namespace chain { namespace wasm_validations { // module validators // effectively do nothing and pass struct noop_validation_visitor { static void validate( const IR::Module& m ); }; struct memories_validation_visitor { static void validate( const IR::Module& m ); }; struct data_segments_validation_visitor { static void validate( const IR::Module& m ); }; struct tables_validation_visitor { static void validate( const IR::Module& m ); }; struct globals_validation_visitor { static void validate( const IR::Module& m ); }; struct maximum_function_stack_visitor { static void validate( const IR::Module& m ); }; struct ensure_apply_exported_visitor { static void validate( const IR::Module& m ); }; using wasm_validate_func = std::function<void(IR::Module&)>; // just pass struct no_constraints_validators { static void validate( const IR::Module& m ) {} }; // instruction validators // simple mutator that doesn't actually mutate anything // used to verify that a given instruction is valid for execution on our platform // for validators set kills to true, this eliminates the extraneous building // of new code that is going to get thrown away any way struct whitelist_validator { static constexpr bool kills = true; static constexpr bool post = false; static void accept( wasm_ops::instr* inst, wasm_ops::visitor_arg& arg ) { // just pass } }; template <typename T> struct large_offset_validator { static constexpr bool kills = true; static constexpr bool post = false; static void accept( wasm_ops::instr* inst, wasm_ops::visitor_arg& arg ) { // cast to a type that has a memarg field T* memarg_instr = reinterpret_cast<T*>(inst); if(memarg_instr->field.o >= wasm_constraints::maximum_linear_memory) FC_THROW_EXCEPTION(wasm_execution_error, "Smart contract used an invalid large memory store/load offset"); } }; struct debug_printer { static constexpr bool kills = false; static constexpr bool post = false; static void init() {} static void accept( wasm_ops::instr* inst, wasm_ops::visitor_arg& arg ) { std::cout << "INSTRUCTION : " << inst->to_string() << "\n"; } }; struct wasm_opcode_no_disposition_exception { std::string opcode_name; }; struct blacklist_validator { static constexpr bool kills = true; static constexpr bool post = false; static void accept( wasm_ops::instr* inst, wasm_ops::visitor_arg& arg ) { FC_THROW_EXCEPTION(wasm_execution_error, "Error, blacklisted opcode ${op} ", ("op", inst->to_string())); } }; struct nested_validator { static constexpr bool kills = false; static constexpr bool post = false; static bool disabled; static uint16_t depth; static void init(bool disable) { disabled = disable; depth = 0; } static void accept( wasm_ops::instr* inst, wasm_ops::visitor_arg& arg ) { if (!disabled) { if ( inst->get_code() == wasm_ops::end_code && depth > 0 ) { depth--; return; } depth++; SNAX_ASSERT(depth < 1024, wasm_execution_error, "Nested depth exceeded"); } } }; // add opcode specific constraints here // so far we only black list struct op_constrainers : wasm_ops::op_types<blacklist_validator> { using block_t = wasm_ops::block <whitelist_validator, nested_validator>; using loop_t = wasm_ops::loop <whitelist_validator, nested_validator>; using if__t = wasm_ops::if_ <whitelist_validator, nested_validator>; using else__t = wasm_ops::else_ <whitelist_validator, nested_validator>; using end_t = wasm_ops::end <whitelist_validator, nested_validator>; using unreachable_t = wasm_ops::unreachable <whitelist_validator>; using br_t = wasm_ops::br <whitelist_validator>; using br_if_t = wasm_ops::br_if <whitelist_validator>; using br_table_t = wasm_ops::br_table <whitelist_validator>; using return__t = wasm_ops::return_ <whitelist_validator>; using call_t = wasm_ops::call <whitelist_validator>; using call_indirect_t = wasm_ops::call_indirect <whitelist_validator>; using drop_t = wasm_ops::drop <whitelist_validator>; using select_t = wasm_ops::select <whitelist_validator>; using get_local_t = wasm_ops::get_local <whitelist_validator>; using set_local_t = wasm_ops::set_local <whitelist_validator>; using tee_local_t = wasm_ops::tee_local <whitelist_validator>; using get_global_t = wasm_ops::get_global <whitelist_validator>; using set_global_t = wasm_ops::set_global <whitelist_validator>; using grow_memory_t = wasm_ops::grow_memory <whitelist_validator>; using current_memory_t = wasm_ops::current_memory <whitelist_validator>; using nop_t = wasm_ops::nop <whitelist_validator>; using i32_load_t = wasm_ops::i32_load <large_offset_validator<wasm_ops::op_types<>::i32_load_t>, whitelist_validator>; using i64_load_t = wasm_ops::i64_load <large_offset_validator<wasm_ops::op_types<>::i64_load_t>, whitelist_validator>; using f32_load_t = wasm_ops::f32_load <large_offset_validator<wasm_ops::op_types<>::f32_load_t>, whitelist_validator>; using f64_load_t = wasm_ops::f64_load <large_offset_validator<wasm_ops::op_types<>::f64_load_t>, whitelist_validator>; using i32_load8_s_t = wasm_ops::i32_load8_s <large_offset_validator<wasm_ops::op_types<>::i32_load8_s_t>, whitelist_validator>; using i32_load8_u_t = wasm_ops::i32_load8_u <large_offset_validator<wasm_ops::op_types<>::i32_load8_u_t>, whitelist_validator>; using i32_load16_s_t = wasm_ops::i32_load16_s <large_offset_validator<wasm_ops::op_types<>::i32_load16_s_t>, whitelist_validator>; using i32_load16_u_t = wasm_ops::i32_load16_u <large_offset_validator<wasm_ops::op_types<>::i32_load16_u_t>, whitelist_validator>; using i64_load8_s_t = wasm_ops::i64_load8_s <large_offset_validator<wasm_ops::op_types<>::i64_load8_s_t>, whitelist_validator>; using i64_load8_u_t = wasm_ops::i64_load8_u <large_offset_validator<wasm_ops::op_types<>::i64_load8_u_t>, whitelist_validator>; using i64_load16_s_t = wasm_ops::i64_load16_s <large_offset_validator<wasm_ops::op_types<>::i64_load16_s_t>, whitelist_validator>; using i64_load16_u_t = wasm_ops::i64_load16_u <large_offset_validator<wasm_ops::op_types<>::i64_load16_u_t>, whitelist_validator>; using i64_load32_s_t = wasm_ops::i64_load32_s <large_offset_validator<wasm_ops::op_types<>::i64_load32_s_t>, whitelist_validator>; using i64_load32_u_t = wasm_ops::i64_load32_u <large_offset_validator<wasm_ops::op_types<>::i64_load32_u_t>, whitelist_validator>; using i32_store_t = wasm_ops::i32_store <large_offset_validator<wasm_ops::op_types<>::i32_store_t>, whitelist_validator>; using i64_store_t = wasm_ops::i64_store <large_offset_validator<wasm_ops::op_types<>::i64_store_t>, whitelist_validator>; using f32_store_t = wasm_ops::f32_store <large_offset_validator<wasm_ops::op_types<>::f32_store_t>, whitelist_validator>; using f64_store_t = wasm_ops::f64_store <large_offset_validator<wasm_ops::op_types<>::f64_store_t>, whitelist_validator>; using i32_store8_t = wasm_ops::i32_store8 <large_offset_validator<wasm_ops::op_types<>::i32_store8_t>, whitelist_validator>; using i32_store16_t = wasm_ops::i32_store16 <large_offset_validator<wasm_ops::op_types<>::i32_store16_t>, whitelist_validator>; using i64_store8_t = wasm_ops::i64_store8 <large_offset_validator<wasm_ops::op_types<>::i64_store8_t>, whitelist_validator>; using i64_store16_t = wasm_ops::i64_store16 <large_offset_validator<wasm_ops::op_types<>::i64_store16_t>, whitelist_validator>; using i64_store32_t = wasm_ops::i64_store32 <large_offset_validator<wasm_ops::op_types<>::i64_store32_t>, whitelist_validator>; using i32_const_t = wasm_ops::i32_const <whitelist_validator>; using i64_const_t = wasm_ops::i64_const <whitelist_validator>; using f32_const_t = wasm_ops::f32_const <whitelist_validator>; using f64_const_t = wasm_ops::f64_const <whitelist_validator>; using i32_eqz_t = wasm_ops::i32_eqz <whitelist_validator>; using i32_eq_t = wasm_ops::i32_eq <whitelist_validator>; using i32_ne_t = wasm_ops::i32_ne <whitelist_validator>; using i32_lt_s_t = wasm_ops::i32_lt_s <whitelist_validator>; using i32_lt_u_t = wasm_ops::i32_lt_u <whitelist_validator>; using i32_gt_s_t = wasm_ops::i32_gt_s <whitelist_validator>; using i32_gt_u_t = wasm_ops::i32_gt_u <whitelist_validator>; using i32_le_s_t = wasm_ops::i32_le_s <whitelist_validator>; using i32_le_u_t = wasm_ops::i32_le_u <whitelist_validator>; using i32_ge_s_t = wasm_ops::i32_ge_s <whitelist_validator>; using i32_ge_u_t = wasm_ops::i32_ge_u <whitelist_validator>; using i32_clz_t = wasm_ops::i32_clz <whitelist_validator>; using i32_ctz_t = wasm_ops::i32_ctz <whitelist_validator>; using i32_popcnt_t = wasm_ops::i32_popcnt <whitelist_validator>; using i32_add_t = wasm_ops::i32_add <whitelist_validator>; using i32_sub_t = wasm_ops::i32_sub <whitelist_validator>; using i32_mul_t = wasm_ops::i32_mul <whitelist_validator>; using i32_div_s_t = wasm_ops::i32_div_s <whitelist_validator>; using i32_div_u_t = wasm_ops::i32_div_u <whitelist_validator>; using i32_rem_s_t = wasm_ops::i32_rem_s <whitelist_validator>; using i32_rem_u_t = wasm_ops::i32_rem_u <whitelist_validator>; using i32_and_t = wasm_ops::i32_and <whitelist_validator>; using i32_or_t = wasm_ops::i32_or <whitelist_validator>; using i32_xor_t = wasm_ops::i32_xor <whitelist_validator>; using i32_shl_t = wasm_ops::i32_shl <whitelist_validator>; using i32_shr_s_t = wasm_ops::i32_shr_s <whitelist_validator>; using i32_shr_u_t = wasm_ops::i32_shr_u <whitelist_validator>; using i32_rotl_t = wasm_ops::i32_rotl <whitelist_validator>; using i32_rotr_t = wasm_ops::i32_rotr <whitelist_validator>; using i64_eqz_t = wasm_ops::i64_eqz <whitelist_validator>; using i64_eq_t = wasm_ops::i64_eq <whitelist_validator>; using i64_ne_t = wasm_ops::i64_ne <whitelist_validator>; using i64_lt_s_t = wasm_ops::i64_lt_s <whitelist_validator>; using i64_lt_u_t = wasm_ops::i64_lt_u <whitelist_validator>; using i64_gt_s_t = wasm_ops::i64_gt_s <whitelist_validator>; using i64_gt_u_t = wasm_ops::i64_gt_u <whitelist_validator>; using i64_le_s_t = wasm_ops::i64_le_s <whitelist_validator>; using i64_le_u_t = wasm_ops::i64_le_u <whitelist_validator>; using i64_ge_s_t = wasm_ops::i64_ge_s <whitelist_validator>; using i64_ge_u_t = wasm_ops::i64_ge_u <whitelist_validator>; using i64_clz_t = wasm_ops::i64_clz <whitelist_validator>; using i64_ctz_t = wasm_ops::i64_ctz <whitelist_validator>; using i64_popcnt_t = wasm_ops::i64_popcnt <whitelist_validator>; using i64_add_t = wasm_ops::i64_add <whitelist_validator>; using i64_sub_t = wasm_ops::i64_sub <whitelist_validator>; using i64_mul_t = wasm_ops::i64_mul <whitelist_validator>; using i64_div_s_t = wasm_ops::i64_div_s <whitelist_validator>; using i64_div_u_t = wasm_ops::i64_div_u <whitelist_validator>; using i64_rem_s_t = wasm_ops::i64_rem_s <whitelist_validator>; using i64_rem_u_t = wasm_ops::i64_rem_u <whitelist_validator>; using i64_and_t = wasm_ops::i64_and <whitelist_validator>; using i64_or_t = wasm_ops::i64_or <whitelist_validator>; using i64_xor_t = wasm_ops::i64_xor <whitelist_validator>; using i64_shl_t = wasm_ops::i64_shl <whitelist_validator>; using i64_shr_s_t = wasm_ops::i64_shr_s <whitelist_validator>; using i64_shr_u_t = wasm_ops::i64_shr_u <whitelist_validator>; using i64_rotl_t = wasm_ops::i64_rotl <whitelist_validator>; using i64_rotr_t = wasm_ops::i64_rotr <whitelist_validator>; using f32_eq_t = wasm_ops::f32_eq <whitelist_validator>; using f32_ne_t = wasm_ops::f32_ne <whitelist_validator>; using f32_lt_t = wasm_ops::f32_lt <whitelist_validator>; using f32_gt_t = wasm_ops::f32_gt <whitelist_validator>; using f32_le_t = wasm_ops::f32_le <whitelist_validator>; using f32_ge_t = wasm_ops::f32_ge <whitelist_validator>; using f64_eq_t = wasm_ops::f64_eq <whitelist_validator>; using f64_ne_t = wasm_ops::f64_ne <whitelist_validator>; using f64_lt_t = wasm_ops::f64_lt <whitelist_validator>; using f64_gt_t = wasm_ops::f64_gt <whitelist_validator>; using f64_le_t = wasm_ops::f64_le <whitelist_validator>; using f64_ge_t = wasm_ops::f64_ge <whitelist_validator>; using f32_abs_t = wasm_ops::f32_abs <whitelist_validator>; using f32_neg_t = wasm_ops::f32_neg <whitelist_validator>; using f32_ceil_t = wasm_ops::f32_ceil <whitelist_validator>; using f32_floor_t = wasm_ops::f32_floor <whitelist_validator>; using f32_trunc_t = wasm_ops::f32_trunc <whitelist_validator>; using f32_nearest_t = wasm_ops::f32_nearest <whitelist_validator>; using f32_sqrt_t = wasm_ops::f32_sqrt <whitelist_validator>; using f32_add_t = wasm_ops::f32_add <whitelist_validator>; using f32_sub_t = wasm_ops::f32_sub <whitelist_validator>; using f32_mul_t = wasm_ops::f32_mul <whitelist_validator>; using f32_div_t = wasm_ops::f32_div <whitelist_validator>; using f32_min_t = wasm_ops::f32_min <whitelist_validator>; using f32_max_t = wasm_ops::f32_max <whitelist_validator>; using f32_copysign_t = wasm_ops::f32_copysign <whitelist_validator>; using f64_abs_t = wasm_ops::f64_abs <whitelist_validator>; using f64_neg_t = wasm_ops::f64_neg <whitelist_validator>; using f64_ceil_t = wasm_ops::f64_ceil <whitelist_validator>; using f64_floor_t = wasm_ops::f64_floor <whitelist_validator>; using f64_trunc_t = wasm_ops::f64_trunc <whitelist_validator>; using f64_nearest_t = wasm_ops::f64_nearest <whitelist_validator>; using f64_sqrt_t = wasm_ops::f64_sqrt <whitelist_validator>; using f64_add_t = wasm_ops::f64_add <whitelist_validator>; using f64_sub_t = wasm_ops::f64_sub <whitelist_validator>; using f64_mul_t = wasm_ops::f64_mul <whitelist_validator>; using f64_div_t = wasm_ops::f64_div <whitelist_validator>; using f64_min_t = wasm_ops::f64_min <whitelist_validator>; using f64_max_t = wasm_ops::f64_max <whitelist_validator>; using f64_copysign_t = wasm_ops::f64_copysign <whitelist_validator>; using i32_trunc_s_f32_t = wasm_ops::i32_trunc_s_f32 <whitelist_validator>; using i32_trunc_u_f32_t = wasm_ops::i32_trunc_u_f32 <whitelist_validator>; using i32_trunc_s_f64_t = wasm_ops::i32_trunc_s_f64 <whitelist_validator>; using i32_trunc_u_f64_t = wasm_ops::i32_trunc_u_f64 <whitelist_validator>; using i64_trunc_s_f32_t = wasm_ops::i64_trunc_s_f32 <whitelist_validator>; using i64_trunc_u_f32_t = wasm_ops::i64_trunc_u_f32 <whitelist_validator>; using i64_trunc_s_f64_t = wasm_ops::i64_trunc_s_f64 <whitelist_validator>; using i64_trunc_u_f64_t = wasm_ops::i64_trunc_u_f64 <whitelist_validator>; using f32_convert_s_i32_t = wasm_ops::f32_convert_s_i32 <whitelist_validator>; using f32_convert_u_i32_t = wasm_ops::f32_convert_u_i32 <whitelist_validator>; using f32_convert_s_i64_t = wasm_ops::f32_convert_s_i64 <whitelist_validator>; using f32_convert_u_i64_t = wasm_ops::f32_convert_u_i64 <whitelist_validator>; using f32_demote_f64_t = wasm_ops::f32_demote_f64 <whitelist_validator>; using f64_convert_s_i32_t = wasm_ops::f64_convert_s_i32 <whitelist_validator>; using f64_convert_u_i32_t = wasm_ops::f64_convert_u_i32 <whitelist_validator>; using f64_convert_s_i64_t = wasm_ops::f64_convert_s_i64 <whitelist_validator>; using f64_convert_u_i64_t = wasm_ops::f64_convert_u_i64 <whitelist_validator>; using f64_promote_f32_t = wasm_ops::f64_promote_f32 <whitelist_validator>; using i32_wrap_i64_t = wasm_ops::i32_wrap_i64 <whitelist_validator>; using i64_extend_s_i32_t = wasm_ops::i64_extend_s_i32 <whitelist_validator>; using i64_extend_u_i32_t = wasm_ops::i64_extend_u_i32 <whitelist_validator>; // TODO, make sure these are just pointer reinterprets using i32_reinterpret_f32_t = wasm_ops::i32_reinterpret_f32 <whitelist_validator>; using f32_reinterpret_i32_t = wasm_ops::f32_reinterpret_i32 <whitelist_validator>; using i64_reinterpret_f64_t = wasm_ops::i64_reinterpret_f64 <whitelist_validator>; using f64_reinterpret_i64_t = wasm_ops::f64_reinterpret_i64 <whitelist_validator>; }; // op_constrainers template <typename ... Visitors> struct constraints_validators { static void validate( const IR::Module& m ) { for ( auto validator : { Visitors::validate... } ) validator( m ); } }; // inherit from this class and define your own validators class wasm_binary_validation { using standard_module_constraints_validators = constraints_validators< memories_validation_visitor, data_segments_validation_visitor, tables_validation_visitor, globals_validation_visitor, maximum_function_stack_visitor, ensure_apply_exported_visitor>; public: wasm_binary_validation( const snax::chain::controller& control, IR::Module& mod ) : _module( &mod ) { // initialize validators here nested_validator::init(!control.is_producing_block()); } void validate() { _module_validators.validate( *_module ); for ( auto& fd : _module->functions.defs ) { wasm_ops::SNAX_OperatorDecoderStream<op_constrainers> decoder(fd.code); while ( decoder ) { wasm_ops::instruction_stream new_code(0); auto op = decoder.decodeOp(); op->visit( { _module, &new_code, &fd, decoder.index() } ); } } } private: IR::Module* _module; static standard_module_constraints_validators _module_validators; }; }}} // namespace wasm_constraints, chain, snax
#include <Rcpp.h> #include "sfheaders/cast/sfg_cast.hpp" #include "sfheaders/cast/sfc_cast.hpp" #include "sfheaders/cast/sf_cast.hpp" // #include "sfheaders/cast/list_cast.hpp" #include "sfheaders/sfc/sfc.hpp" // [[Rcpp::export]] Rcpp::IntegerVector rcpp_count_new_objects( SEXP sfg, std::string cast_to ) { R_xlen_t x = sfheaders::cast::count_new_objects( sfg, cast_to ); Rcpp::IntegerVector res(1); res[0] = x; return res; } // [[Rcpp::export]] Rcpp::IntegerVector rcpp_count_new_sfc_objects( Rcpp::List sfc, std::string cast_to ) { return sfheaders::cast::count_new_sfc_objects( sfc, cast_to ); } // [[Rcpp::export]] Rcpp::List rcpp_cast_sfc( Rcpp::List sfc, std::string cast_to, bool close = true ) { Rcpp::List sfc2 = Rcpp::clone( sfc ); return sfheaders::cast::cast_sfc( sfc2, cast_to, close ); } // [[Rcpp::export]] Rcpp::DataFrame rcpp_cast_sf( Rcpp::DataFrame sf, std::string cast_to, SEXP list_columns, bool close = true ) { return sfheaders::cast::cast_sf( sf, cast_to, list_columns, close ); } // // [[Rcpp::export]] // SEXP rcpp_cast_remove_one( Rcpp::List lst ) { // return sfheaders::cast::remove_one( lst ); // }
#include "modules/reliable_module.h" #include "builder/static_var.h" #include "blocks/rce.h" #include "modules/inorder_module.h" #include "modules/identifier_module.h" namespace net_blocks { reliable_module reliable_module::instance; void reliable_module::init_module(void) { // This buffer helps us find waiting packets when ACKs arrive conn_layout.register_member<builder::dyn_var<void*[REDELIVERY_BUFFER_SIZE]>>("redelivery_buffer"); conn_layout.register_member<builder::dyn_var<unsigned int>>("first_unacked_seq"); net_packet.add_member("ack_sequence_number", new generic_integer_member<unsigned int>((int)member_flags::aligned), 4); net_packet.add_member("redelivery_timer", new generic_integer_member<unsigned long long>((int)member_flags::aligned), 0); m_establish_depends = {&inorder_module::instance}; m_destablish_depends = {&inorder_module::instance}; m_send_depends = {&inorder_module::instance}; m_ingress_depends = {&identifier_module::instance}; framework::instance.register_module(this); } module::hook_status reliable_module::hook_establish(builder::dyn_var<connection_t*> c, builder::dyn_var<char*> remote_host, builder::dyn_var<unsigned int> remote_app, builder::dyn_var<unsigned int> local_app) { for (builder::dyn_var<int> i = 0; i < REDELIVERY_BUFFER_SIZE; i = i + 1) { conn_layout.get(c, "redelivery_buffer")[i] = 0; } return module::hook_status::HOOK_CONTINUE; } static builder::dyn_var<void(timer_t*, void*, unsigned long long)> redelivery_timer_callback("nb__reliable_redelivery_timer_cb"); static void redelivery_cb(builder::dyn_var<runtime::timer_t*> t, builder::dyn_var<void*> param, builder::dyn_var<unsigned long long> to) { packet_t p = param; builder::dyn_var<int> size = net_packet["total_len"]->get_integer(p); runtime::send_packet(p + get_headroom(), size); runtime::insert_timer(t, to + REDELIVERY_TIMEOUT_MS, redelivery_timer_callback, p); } void reliable_module::gen_timer_callback(std::ostream &oss) { auto ast = builder::builder_context().extract_function_ast(redelivery_cb, "nb__reliable_redelivery_timer_cb"); block::eliminate_redundant_vars(ast); block::c_code_generator::generate_code(ast, oss); } module::hook_status reliable_module::hook_send(builder::dyn_var<connection_t*> c, packet_t p, builder::dyn_var<char*> buff, builder::dyn_var<unsigned int> len, builder::dyn_var<int*> ret_len) { // We are about to send a new packet, put it in the redelivery buffer builder::dyn_var<unsigned int> index = net_packet["sequence_number"]->get_integer(p) % REDELIVERY_BUFFER_SIZE; conn_layout.get(c, "redelivery_buffer")[index] = p; builder::dyn_var<runtime::timer_t*> t = runtime::alloc_timer(); runtime::insert_timer(t, runtime::get_time_ms_now() + REDELIVERY_TIMEOUT_MS, redelivery_timer_callback, p); net_packet["redelivery_timer"]->set_integer(p, runtime::to_ull(t)); net_packet["ack_sequence_number"]->set_integer(p, 0); return module::hook_status::HOOK_CONTINUE; } module::hook_status reliable_module::hook_ingress(packet_t p) { builder::dyn_var<unsigned int> ack_seq = net_packet["ack_sequence_number"]->get_integer(p); builder::dyn_var<connection_t*> c = runtime::to_void_ptr(net_packet["flow_identifier"]->get_integer(p)); if (ack_seq != 0) { builder::dyn_var<unsigned int> index = ack_seq % REDELIVERY_BUFFER_SIZE; packet_t p_rem = conn_layout.get(c, "redelivery_buffer")[index]; builder::dyn_var<runtime::timer_t*> t = runtime::to_void_ptr(net_packet["redelivery_timer"]->get_integer(p_rem)); runtime::remove_timer(t); runtime::return_timer(t); conn_layout.get(c, "redelivery_buffer")[index] = 0; return module::hook_status::HOOK_DROP; } builder::dyn_var<unsigned int> seq = net_packet["sequence_number"]->get_integer(p); // This is a normal packet, send an ACK packet_t p_ack = runtime::request_send_buffer(); // Invoke the send path from after this module builder::dyn_var<char[1]> buff = {0}; builder::dyn_var<int> ret_len; builder::dyn_var<int*> ret_len_ptr = &ret_len; net_packet["ack_sequence_number"]->set_integer(p_ack, seq); // Ack packets have size = 1 net_packet["total_len"]->set_integer(p_ack, net_packet.get_total_size() - get_headroom()); for (builder::static_var<unsigned int> i = m_sequence + 1; i < framework::instance.m_registered_modules.size(); i++) { builder::static_var<int> s = (int)framework::instance.m_registered_modules[i]->hook_send(c, p_ack, buff, 1, ret_len_ptr); if (s == (int)module::hook_status::HOOK_DROP) break; } return module::hook_status::HOOK_CONTINUE; } }
/* * (C) Copyright IBM Corporation 2018, 2019. All rights reserved. * US Government Users Restricted Rights - Use, duplication or disclosure restricted by GSA ADP Schedule Contract with IBM Corp. */ /* * Hopscotch hash table - template Key, Value, and allocators */ #include <type_traits> /* remove_const */ #include <utility> /* move */ #include "perishable.h" /* * ===== content ===== */ #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Weffc++" template <typename Value> impl::content<Value>::content() : _state(FREE) , _v() #if TRACK_OWNER , _owner(owner_undefined) #endif {} #pragma GCC diagnostic pop template <typename Value> void impl::content<Value>::set_owner(owner_t #if TRACK_OWNER owner_ #endif ) { #if TRACK_OWNER _owner = owner_; #endif } template <typename Value> auto impl::content<Value>::erase() -> void { if ( _state != FREE ) { _v._value.~value_t(); _state = FREE; } set_owner(owner_undefined); } template <typename Value> auto impl::content<Value>::content_share( const content &sr_ , std::size_t bi_ ) -> content & { using k_t = typename value_t::first_type; using m_t = typename value_t::second_type; new (&const_cast<std::remove_const_t<k_t> &>(_v._value.first)) k_t(sr_._v._value.first) ; new (&_v._value.second) m_t(sr_._v._value.second); set_owner(bi_); return *this; } template <typename Value> auto impl::content<Value>::content_share( content &from_ ) -> content & { using k_t = typename value_t::first_type; using m_t = typename value_t::second_type; assert(_state == FREE); new (&const_cast<std::remove_const_t<k_t> &>(_v._value.first)) k_t(from_._v._value.first) ; new (&_v._value.second) m_t(from_._v._value.second); set_owner(from_.get_owner()); return *this; } template <typename Value> template <typename ... Args> auto impl::content<Value>::content_construct( std::size_t bi_ , Args && ... args_ ) -> content & { assert(_state == FREE); new (&_v._value) Value(std::forward<Args>(args_)...); set_owner(bi_); return *this; }
/* Copyright (c) 2011, Arvid Norberg All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "libtorrent/session.hpp" #include "libtorrent/session_settings.hpp" #include "libtorrent/hasher.hpp" #include "libtorrent/alert_types.hpp" #include "libtorrent/ip_filter.hpp" #include "libtorrent/thread.hpp" #include <boost/tuple/tuple.hpp> #include <iostream> #include "test.hpp" #include "setup_transfer.hpp" #include "settings.hpp" void test_swarm() { using namespace libtorrent; namespace lt = libtorrent; // these are declared before the session objects // so that they are destructed last. This enables // the sessions to destruct in parallel session_proxy p1; session_proxy p2; session_proxy p3; // this is to avoid everything finish from a single peer // immediately. To make the swarm actually connect all // three peers before finishing. float rate_limit = 50000; settings_pack pack = settings(); // run the choker once per second, to make it more likely to actually trigger // during the test. pack.set_int(settings_pack::unchoke_interval, 1); pack.set_bool(settings_pack::allow_multiple_connections_per_ip, true); pack.set_int(settings_pack::choking_algorithm, settings_pack::rate_based_choker); pack.set_int(settings_pack::upload_rate_limit, rate_limit); pack.set_int(settings_pack::unchoke_slots_limit, 1); pack.set_int(settings_pack::max_retry_port_bind, 900); pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:48010"); pack.set_bool(settings_pack::enable_natpmp, false); pack.set_bool(settings_pack::enable_upnp, false); pack.set_bool(settings_pack::enable_dht, false); #ifndef TORRENT_NO_DEPRECATE pack.set_bool(settings_pack::rate_limit_utp, true); #endif pack.set_int(settings_pack::out_enc_policy, settings_pack::pe_forced); pack.set_int(settings_pack::in_enc_policy, settings_pack::pe_forced); lt::session ses1(pack); pack.set_int(settings_pack::upload_rate_limit, rate_limit / 10); pack.set_int(settings_pack::download_rate_limit, rate_limit / 5); pack.set_int(settings_pack::unchoke_slots_limit, 0); pack.set_int(settings_pack::choking_algorithm, settings_pack::fixed_slots_choker); pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:49010"); lt::session ses2(pack); pack.set_str(settings_pack::listen_interfaces, "0.0.0.0:49010"); lt::session ses3(pack); torrent_handle tor1; torrent_handle tor2; torrent_handle tor3; boost::tie(tor1, tor2, tor3) = setup_transfer(&ses1, &ses2, &ses3, true, false, true, "_unchoke"); std::map<std::string, boost::int64_t> cnt = get_counters(ses1); fprintf(stderr, "allowed_upload_slots: %d\n", int(cnt["ses.num_unchoke_slots"])); TEST_EQUAL(cnt["ses.num_unchoke_slots"], 1); for (int i = 0; i < 200; ++i) { print_alerts(ses1, "ses1"); print_alerts(ses2, "ses2"); print_alerts(ses3, "ses3"); cnt = get_counters(ses1); fprintf(stderr, "allowed unchoked: %d\n", int(cnt["ses.num_unchoke_slots"])); if (cnt["ses.num_unchoke_slots"] >= 2) break; torrent_status st1 = tor1.status(); torrent_status st2 = tor2.status(); torrent_status st3 = tor3.status(); print_ses_rate(i / 10.f, &st1, &st2, &st3); test_sleep(100); } TEST_CHECK(cnt["ses.num_unchoke_slots"] >= 2); // make sure the files are deleted ses1.remove_torrent(tor1, lt::session::delete_files); ses2.remove_torrent(tor2, lt::session::delete_files); ses3.remove_torrent(tor3, lt::session::delete_files); // this allows shutting down the sessions in parallel p1 = ses1.abort(); p2 = ses2.abort(); p3 = ses3.abort(); } TORRENT_TEST(auto_unchoke) { using namespace libtorrent; // in case the previous run was terminated error_code ec; remove_all("./tmp1_unchoke", ec); remove_all("./tmp2_unchoke", ec); remove_all("./tmp3_unchoke", ec); test_swarm(); TEST_CHECK(!exists("./tmp1_unchoke/temporary")); TEST_CHECK(!exists("./tmp2_unchoke/temporary")); TEST_CHECK(!exists("./tmp3_unchoke/temporary")); remove_all("./tmp1_unchoke", ec); remove_all("./tmp2_unchoke", ec); remove_all("./tmp3_unchoke", ec); }
// Copyright (c) 2012 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/renderer/pepper/pepper_platform_audio_input_impl.h" #include "base/bind.h" #include "base/logging.h" #include "base/message_loop_proxy.h" #include "build/build_config.h" #include "content/common/child_process.h" #include "content/common/media/audio_messages.h" #include "content/renderer/media/audio_input_message_filter.h" #include "content/renderer/pepper/pepper_plugin_delegate_impl.h" #include "content/renderer/render_thread_impl.h" #include "media/audio/audio_manager_base.h" namespace content { // static PepperPlatformAudioInputImpl* PepperPlatformAudioInputImpl::Create( const base::WeakPtr<PepperPluginDelegateImpl>& plugin_delegate, const std::string& device_id, int sample_rate, int frames_per_buffer, webkit::ppapi::PluginDelegate::PlatformAudioInputClient* client) { scoped_refptr<PepperPlatformAudioInputImpl> audio_input( new PepperPlatformAudioInputImpl()); if (audio_input->Initialize(plugin_delegate, device_id, sample_rate, frames_per_buffer, client)) { // Balanced by Release invoked in // PepperPlatformAudioInputImpl::ShutDownOnIOThread(). audio_input->AddRef(); return audio_input.get(); } return NULL; } void PepperPlatformAudioInputImpl::StartCapture() { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); ChildProcess::current()->io_message_loop()->PostTask( FROM_HERE, base::Bind(&PepperPlatformAudioInputImpl::StartCaptureOnIOThread, this)); } void PepperPlatformAudioInputImpl::StopCapture() { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); ChildProcess::current()->io_message_loop()->PostTask( FROM_HERE, base::Bind(&PepperPlatformAudioInputImpl::StopCaptureOnIOThread, this)); } void PepperPlatformAudioInputImpl::ShutDown() { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); // Called on the main thread to stop all audio callbacks. We must only change // the client on the main thread, and the delegates from the I/O thread. client_ = NULL; ChildProcess::current()->io_message_loop()->PostTask( FROM_HERE, base::Bind(&PepperPlatformAudioInputImpl::ShutDownOnIOThread, this)); } void PepperPlatformAudioInputImpl::OnStreamCreated( base::SharedMemoryHandle handle, base::SyncSocket::Handle socket_handle, int length, int total_segments) { #if defined(OS_WIN) DCHECK(handle); DCHECK(socket_handle); #else DCHECK_NE(-1, handle.fd); DCHECK_NE(-1, socket_handle); #endif DCHECK(length); // TODO(yzshen): Make use of circular buffer scheme. crbug.com/181449. DCHECK_EQ(1, total_segments); if (base::MessageLoopProxy::current() != main_message_loop_proxy_) { // No need to check |shutdown_called_| here. If shutdown has occurred, // |client_| will be NULL and the handles will be cleaned up on the main // thread. main_message_loop_proxy_->PostTask( FROM_HERE, base::Bind(&PepperPlatformAudioInputImpl::OnStreamCreated, this, handle, socket_handle, length, total_segments)); } else { // Must dereference the client only on the main thread. Shutdown may have // occurred while the request was in-flight, so we need to NULL check. if (client_) { client_->StreamCreated(handle, length, socket_handle); } else { // Clean up the handles. base::SyncSocket temp_socket(socket_handle); base::SharedMemory temp_shared_memory(handle, false); } } } void PepperPlatformAudioInputImpl::OnVolume(double volume) {} void PepperPlatformAudioInputImpl::OnStateChanged( media::AudioInputIPCDelegate::State state) { } void PepperPlatformAudioInputImpl::OnIPCClosed() { ipc_ = NULL; } PepperPlatformAudioInputImpl::~PepperPlatformAudioInputImpl() { // Make sure we have been shut down. Warning: this may happen on the I/O // thread! // Although these members should be accessed on a specific thread (either the // main thread or the I/O thread), it should be fine to examine their value // here. DCHECK_EQ(0, stream_id_); DCHECK(!client_); DCHECK(label_.empty()); DCHECK(shutdown_called_); } PepperPlatformAudioInputImpl::PepperPlatformAudioInputImpl() : client_(NULL), stream_id_(0), render_view_id_(MSG_ROUTING_NONE), main_message_loop_proxy_(base::MessageLoopProxy::current()), shutdown_called_(false) { ipc_ = RenderThreadImpl::current()->audio_input_message_filter(); } bool PepperPlatformAudioInputImpl::Initialize( const base::WeakPtr<PepperPluginDelegateImpl>& plugin_delegate, const std::string& device_id, int sample_rate, int frames_per_buffer, webkit::ppapi::PluginDelegate::PlatformAudioInputClient* client) { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); if (!plugin_delegate || !client) return false; plugin_delegate_ = plugin_delegate; render_view_id_ = plugin_delegate_->GetRoutingID(); client_ = client; params_.Reset(media::AudioParameters::AUDIO_PCM_LINEAR, media::CHANNEL_LAYOUT_MONO, 1, 0, sample_rate, 16, frames_per_buffer); // We need to open the device and obtain the label and session ID before // initializing. plugin_delegate_->OpenDevice( PP_DEVICETYPE_DEV_AUDIOCAPTURE, device_id.empty() ? media::AudioManagerBase::kDefaultDeviceId : device_id, base::Bind(&PepperPlatformAudioInputImpl::OnDeviceOpened, this)); return true; } void PepperPlatformAudioInputImpl::InitializeOnIOThread(int session_id) { DCHECK(ChildProcess::current()->io_message_loop_proxy()-> BelongsToCurrentThread()); if (shutdown_called_) return; // Make sure we don't call init more than once. DCHECK_EQ(0, stream_id_); stream_id_ = ipc_->AddDelegate(this); DCHECK_NE(0, stream_id_); // We will be notified by OnStreamCreated(). ipc_->CreateStream(stream_id_, session_id, params_, false, 1); } void PepperPlatformAudioInputImpl::StartCaptureOnIOThread() { DCHECK(ChildProcess::current()->io_message_loop_proxy()-> BelongsToCurrentThread()); if (stream_id_) { ipc_->AssociateStreamWithConsumer(stream_id_, render_view_id_); ipc_->RecordStream(stream_id_); } } void PepperPlatformAudioInputImpl::StopCaptureOnIOThread() { DCHECK(ChildProcess::current()->io_message_loop_proxy()-> BelongsToCurrentThread()); // TODO(yzshen): We cannot re-start capturing if the stream is closed. if (stream_id_) ipc_->CloseStream(stream_id_); } void PepperPlatformAudioInputImpl::ShutDownOnIOThread() { DCHECK(ChildProcess::current()->io_message_loop_proxy()-> BelongsToCurrentThread()); // Make sure we don't call shutdown more than once. if (shutdown_called_) return; shutdown_called_ = true; if (stream_id_) { ipc_->CloseStream(stream_id_); ipc_->RemoveDelegate(stream_id_); stream_id_ = 0; } main_message_loop_proxy_->PostTask( FROM_HERE, base::Bind(&PepperPlatformAudioInputImpl::CloseDevice, this)); Release(); // Release for the delegate, balances out the reference taken in // PepperPluginDelegateImpl::CreateAudioInput. } void PepperPlatformAudioInputImpl::OnDeviceOpened(int request_id, bool succeeded, const std::string& label) { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); if (succeeded && plugin_delegate_) { DCHECK(!label.empty()); label_ = label; if (client_) { int session_id = plugin_delegate_->GetSessionID( PP_DEVICETYPE_DEV_AUDIOCAPTURE, label); ChildProcess::current()->io_message_loop()->PostTask( FROM_HERE, base::Bind(&PepperPlatformAudioInputImpl::InitializeOnIOThread, this, session_id)); } else { // Shutdown has occurred. CloseDevice(); } } else { NotifyStreamCreationFailed(); } } void PepperPlatformAudioInputImpl::CloseDevice() { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); if (plugin_delegate_ && !label_.empty()) { plugin_delegate_->CloseDevice(label_); label_.clear(); } } void PepperPlatformAudioInputImpl::NotifyStreamCreationFailed() { DCHECK(main_message_loop_proxy_->BelongsToCurrentThread()); if (client_) client_->StreamCreationFailed(); } } // namespace content
// Copyright 2019 Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <inttypes.h> #include <algorithm> #include <cstdio> #include <cstdlib> #include <cstring> #include <ctime> #include "app_framework.h" // NOLINT #include "firebase/analytics.h" #include "firebase/analytics/event_names.h" #include "firebase/analytics/parameter_names.h" #include "firebase/analytics/user_property_names.h" #include "firebase/app.h" #include "firebase/util.h" #include "firebase_test_framework.h" // NOLINT // The TO_STRING macro is useful for command line defined strings as the quotes // get stripped. #define TO_STRING_EXPAND(X) #X #define TO_STRING(X) TO_STRING_EXPAND(X) // Path to the Firebase config file to load. #ifdef FIREBASE_CONFIG #define FIREBASE_CONFIG_STRING TO_STRING(FIREBASE_CONFIG) #else #define FIREBASE_CONFIG_STRING "" #endif // FIREBASE_CONFIG namespace firebase_testapp_automated { using app_framework::ProcessEvents; using firebase_test_framework::FirebaseTest; class FirebaseAnalyticsTest : public FirebaseTest { public: static void SetUpTestSuite(); static void TearDownTestSuite(); static firebase::App* shared_app_; }; firebase::App* FirebaseAnalyticsTest::shared_app_; void FirebaseAnalyticsTest::SetUpTestSuite() { #if defined(__ANDROID__) shared_app_ = firebase::App::Create(app_framework::GetJniEnv(), app_framework::GetActivity()); #else shared_app_ = firebase::App::Create(); #endif // defined(__ANDROID__) firebase::analytics::Initialize(*shared_app_); } void FirebaseAnalyticsTest::TearDownTestSuite() { firebase::analytics::Terminate(); delete shared_app_; shared_app_ = nullptr; // The Analytics integration test is too fast for FTL, so pause a few seconds // here. ProcessEvents(1000); ProcessEvents(1000); ProcessEvents(1000); ProcessEvents(1000); ProcessEvents(1000); } TEST_F(FirebaseAnalyticsTest, TestSetCollectionEnabled) { // Can't confirm that these do anything but just run them all to ensure the // app doesn't crash. firebase::analytics::SetAnalyticsCollectionEnabled(true); firebase::analytics::SetAnalyticsCollectionEnabled(false); firebase::analytics::SetAnalyticsCollectionEnabled(true); } TEST_F(FirebaseAnalyticsTest, TestSetSessionTimeoutDuraction) { firebase::analytics::SetSessionTimeoutDuration(1000 * 60 * 5); firebase::analytics::SetSessionTimeoutDuration(1000 * 60 * 15); firebase::analytics::SetSessionTimeoutDuration(1000 * 60 * 30); } TEST_F(FirebaseAnalyticsTest, TestGetAnalyticsInstanceID) { firebase::Future<std::string> future = firebase::analytics::GetAnalyticsInstanceId(); WaitForCompletion(future, "GetAnalyticsInstanceId"); EXPECT_FALSE(future.result()->empty()); } TEST_F(FirebaseAnalyticsTest, TestSetProperties) { // Set the user's sign up method. firebase::analytics::SetUserProperty( firebase::analytics::kUserPropertySignUpMethod, "Google"); // Set the user ID. firebase::analytics::SetUserId("my_integration_test_user"); } TEST_F(FirebaseAnalyticsTest, TestLogEvents) { // Log an event with no parameters. firebase::analytics::LogEvent(firebase::analytics::kEventLogin); // Log an event with a floating point parameter. firebase::analytics::LogEvent("progress", "percent", 0.4f); // Log an event with an integer parameter. firebase::analytics::LogEvent(firebase::analytics::kEventPostScore, firebase::analytics::kParameterScore, 42); // Log an event with a string parameter. firebase::analytics::LogEvent(firebase::analytics::kEventJoinGroup, firebase::analytics::kParameterGroupID, "spoon_welders"); } TEST_F(FirebaseAnalyticsTest, TestLogEventWithMultipleParameters) { const firebase::analytics::Parameter kLevelUpParameters[] = { firebase::analytics::Parameter(firebase::analytics::kParameterLevel, 5), firebase::analytics::Parameter(firebase::analytics::kParameterCharacter, "mrspoon"), firebase::analytics::Parameter("hit_accuracy", 3.14f), }; firebase::analytics::LogEvent( firebase::analytics::kEventLevelUp, kLevelUpParameters, sizeof(kLevelUpParameters) / sizeof(kLevelUpParameters[0])); } TEST_F(FirebaseAnalyticsTest, TestResettingGivesNewInstanceId) { // Test is flaky on iPhone due to a known issue in iOS. See b/143656277. #if TARGET_OS_IPHONE FLAKY_TEST_SECTION_BEGIN(); #endif // TARGET_OS_IPHONE firebase::Future<std::string> future = firebase::analytics::GetAnalyticsInstanceId(); WaitForCompletion(future, "GetAnalyticsInstanceId"); EXPECT_FALSE(future.result()->empty()); std::string instance_id = *future.result(); firebase::analytics::ResetAnalyticsData(); future = firebase::analytics::GetAnalyticsInstanceId(); WaitForCompletion(future, "GetAnalyticsInstanceId after ResetAnalyticsData"); std::string new_instance_id = *future.result(); EXPECT_FALSE(future.result()->empty()); EXPECT_NE(instance_id, new_instance_id); #if TARGET_OS_IPHONE FLAKY_TEST_SECTION_END(); #endif // TARGET_OS_IPHONE } } // namespace firebase_testapp_automated
// Copyright (c) 2012-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <util.h> #include <test/test_zenx.h> #include <string> #include <vector> #include <boost/algorithm/string.hpp> #include <boost/test/unit_test.hpp> BOOST_FIXTURE_TEST_SUITE(getarg_tests, BasicTestingSetup) static void ResetArgs(const std::string& strArg) { std::vector<std::string> vecArg; if (strArg.size()) boost::split(vecArg, strArg, boost::is_space(), boost::token_compress_on); // Insert dummy executable name: vecArg.insert(vecArg.begin(), "testzenx"); // Convert to char*: std::vector<const char*> vecChar; for (std::string& s : vecArg) vecChar.push_back(s.c_str()); gArgs.ParseParameters(vecChar.size(), vecChar.data()); } BOOST_AUTO_TEST_CASE(boolarg) { ResetArgs("-foo"); BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); BOOST_CHECK(!gArgs.GetBoolArg("-fo", false)); BOOST_CHECK(gArgs.GetBoolArg("-fo", true)); BOOST_CHECK(!gArgs.GetBoolArg("-fooo", false)); BOOST_CHECK(gArgs.GetBoolArg("-fooo", true)); ResetArgs("-foo=0"); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-foo=1"); BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); // New 0.6 feature: auto-map -nosomething to !-something: ResetArgs("-nofoo"); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-nofoo=1"); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-foo -nofoo"); // -nofoo should win BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-foo=1 -nofoo=1"); // -nofoo should win BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); ResetArgs("-foo=0 -nofoo=0"); // -nofoo=0 should win BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); // New 0.6 feature: treat -- same as -: ResetArgs("--foo=1"); BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); ResetArgs("--nofoo=1"); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); } BOOST_AUTO_TEST_CASE(stringarg) { ResetArgs(""); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), ""); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), "eleven"); ResetArgs("-foo -bar"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), ""); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), ""); ResetArgs("-foo="); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), ""); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), ""); ResetArgs("-foo=11"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), "11"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), "11"); ResetArgs("-foo=eleven"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), "eleven"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", "eleven"), "eleven"); } BOOST_AUTO_TEST_CASE(intarg) { ResetArgs(""); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 11), 11); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 0), 0); ResetArgs("-foo -bar"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 11), 0); BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 11), 0); ResetArgs("-foo=11 -bar=12"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 0), 11); BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 11), 12); ResetArgs("-foo=NaN -bar=NotANumber"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", 1), 0); BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 11), 0); } BOOST_AUTO_TEST_CASE(doublezenx) { ResetArgs("--foo"); BOOST_CHECK_EQUAL(gArgs.GetBoolArg("-foo", false), true); ResetArgs("--foo=verbose --bar=1"); BOOST_CHECK_EQUAL(gArgs.GetArg("-foo", ""), "verbose"); BOOST_CHECK_EQUAL(gArgs.GetArg("-bar", 0), 1); } BOOST_AUTO_TEST_CASE(boolargno) { ResetArgs("-nofoo"); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); ResetArgs("-nofoo=1"); BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); ResetArgs("-nofoo=0"); BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); ResetArgs("-foo --nofoo"); // --nofoo should win BOOST_CHECK(!gArgs.GetBoolArg("-foo", true)); BOOST_CHECK(!gArgs.GetBoolArg("-foo", false)); ResetArgs("-nofoo -foo"); // foo always wins: BOOST_CHECK(gArgs.GetBoolArg("-foo", true)); BOOST_CHECK(gArgs.GetBoolArg("-foo", false)); } BOOST_AUTO_TEST_SUITE_END()
#include "mainwindow.h" #include <QApplication> int main(int argc, char *argv[]) { QApplication app(argc, argv); int ret; do { MainWindow win; win.show(); ret = app.exec(); } while(ret == 207); return ret; }
#include <cvd/videosource.h> namespace CVD{ template <> VideoBuffer<byte>* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } template <> VideoBuffer<bayer_grbg>* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } template <> VideoBuffer<yuv422>* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } template <> VideoBuffer<vuy422>* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } template <> VideoBuffer<Rgb<byte> >* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } template <> VideoBuffer<Rgb8>* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } template <> VideoBuffer<yuv420p>* makeV4LBuffer(const std::string&, const ImageRef&, int, bool, bool) { throw VideoSourceException("V4LBuffer is not compiled in to libcvd."); } }
#pragma once namespace mu { namespace llvmc { enum class predicates { icmp_eq, icmp_ne, icmp_ugt, icmp_uge, icmp_ult, icmp_ule, icmp_sgt, icmp_sge, icmp_slt, icmp_sle }; } }
//@COPYRIGHT@// // // Copyright (c) 2011, Tomoto S. Washio // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the Tomoto S. Washio nor the names of his // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // //@COPYRIGHT@// #include "TimeTicker.h" TimeTicker::TimeTicker() { m_lastTime = 0; m_isLocked = false; } float TimeTicker::tick() { DWORD currentTime = GetTickCount(); float dt = (m_lastTime == 0) ? 0.0f : (currentTime - m_lastTime) / 1000.0f; if (!m_isLocked) { m_lastTime = currentTime; } return dt; } float TimeTicker::lock() { m_isLocked = false; float dt = tick(); m_isLocked = true; return dt; } float TimeTicker::unlock() { m_isLocked = false; return tick(); }
//---------------------------------------------------------------------------- // // TSDuck - The MPEG Transport Stream Toolkit // Copyright (c) 2005-2021, Thierry Lelegard // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF // THE POSSIBILITY OF SUCH DAMAGE. // //---------------------------------------------------------------------------- #include "tstsmuxOutputExecutor.h" #include "tsGuardCondition.h" #include "tsGuard.h" TSDUCK_SOURCE; //---------------------------------------------------------------------------- // Constructor and destructor. //---------------------------------------------------------------------------- ts::tsmux::OutputExecutor::OutputExecutor(const MuxerArgs& opt, const PluginEventHandlerRegistry& handlers, Report& log) : PluginExecutor(opt, handlers, PluginType::OUTPUT, opt.output, ThreadAttributes(), log), _output(dynamic_cast<OutputPlugin*>(plugin())) { } ts::tsmux::OutputExecutor::~OutputExecutor() { waitForTermination(); } //---------------------------------------------------------------------------- // Implementation of TSP. //---------------------------------------------------------------------------- size_t ts::tsmux::OutputExecutor::pluginIndex() const { // The output plugin comes last. return _opt.inputs.size(); } //---------------------------------------------------------------------------- // Copy packets in the output buffer. //---------------------------------------------------------------------------- bool ts::tsmux::OutputExecutor::send(const TSPacket* pkt, const TSPacketMetadata* mdata, size_t count) { // Loop until everything is copied in the buffer or termination. while (!_terminate && count > 0) { // Loop until there is some free space in the buffer. GuardCondition lock(_mutex, _got_freespace); while (!_terminate && _packets_count >= _buffer_size) { lock.waitCondition(); } // Fill what can be filled in the buffer. We are still under the mutex protection. if (!_terminate) { assert(_packets_count <= _buffer_size); // Number of free packets in the buffer: const size_t free_size = _buffer_size - _packets_count; // End of output area, where to copy packets: const size_t copy_first = (_packets_first + _packets_count) % _buffer_size; // Number of contiguous packets which can be copied: const size_t fill_count = std::min(std::min(count, free_size), _buffer_size - copy_first); // Copy packets. TSPacket::Copy(&_packets[copy_first], pkt, fill_count); TSPacketMetadata::Copy(&_metadata[copy_first], mdata, fill_count); count -= fill_count; _packets_count += fill_count; pkt += fill_count; mdata += fill_count; // Signal that there are some packets to send. // The mutex was initially locked for the _got_freespace condition because we needed to wait // for that condition but we can also use it to signal the _got_packets condition. _got_packets.signal(); } } return !_terminate; } //---------------------------------------------------------------------------- // Invoked in the context of the output plugin thread. //---------------------------------------------------------------------------- void ts::tsmux::OutputExecutor::main() { debug(u"output thread started"); // Loop until we are instructed to stop. while (!_terminate) { // Wait for packets to be available in the output buffer. size_t first = 0; size_t count = 0; { GuardCondition lock(_mutex, _got_packets); while (_packets_count == 0 && !_terminate) { lock.waitCondition(); } // We can output these packets. first = _packets_first; count = _packets_count; } // Output available packets. while (count > 0 && !_terminate) { // Output some packets. Not more that --max-output-packets, not more than up to end of circular buffer. const size_t send_count = std::min(std::min(count, _opt.maxOutputPackets), _buffer_size - _packets_first); if (_output->send(&_packets[first], &_metadata[first], send_count)) { // Packets successfully sent. GuardCondition lock(_mutex, _got_freespace); _packets_count -= send_count; _packets_first = (_packets_first + send_count) % _buffer_size; count -= send_count; first = (first + send_count) % _buffer_size; // Signal that there are some free space in the buffer. lock.signal(); } else if (_opt.outputOnce) { // Terminates when the output plugin fails. _terminate = true; } else { // Restart when the plugin fails. verbose(u"restarting output plugin '%s' after failure", {pluginName()}); _output->stop(); while (!_terminate && !_output->start()) { SleepThread(_opt.outputRestartDelay); } } } } // Stop the plugin. _output->stop(); debug(u"output thread terminated"); }
#include <opencv\highgui.h> #include <opencv\cv.h> #include <iostream> using namespace cv; using namespace std; string intToString(int number){ std::stringstream ss; ss << number; return ss.str(); } int main(int argc, char* argv[]) { VideoCapture cap(0); // open the video camera no. 0 cv::VideoWriter writer; if (!cap.isOpened()) // if not success, exit program { cout << "ERROR INITIALIZING VIDEO CAPTURE" << endl; return -1; } char* windowName = "Webcam Feed"; namedWindow(windowName,CV_WINDOW_AUTOSIZE); //create a window to display our webcam feed //filename string string filename = "D:\myVideo.avi"; //fourcc integer int fcc = CV_FOURCC('D','I','V','3'); //frames per sec integer int fps = 10; //frame size cv::Size frameSize(cap.get(CV_CAP_PROP_FRAME_WIDTH),cap.get(CV_CAP_PROP_FRAME_HEIGHT)); writer = VideoWriter(filename,fcc,fps,frameSize); if(!writer.isOpened()){ cout<<"ERROR OPENING FILE FOR WRITE"<<endl; getchar(); return -1; } Mat frame; while (1) { bool bSuccess = cap.read(frame); // read a new frame from camera feed if (!bSuccess) //test if frame successfully read { cout << "ERROR READING FRAME FROM CAMERA FEED" << endl; break; } writer.write(frame); imshow(windowName, frame); //show the frame in "MyVideo" window //listen for 10ms for a key to be pressed switch(waitKey(10)){ case 27: //'esc' has been pressed (ASCII value for 'esc' is 27) //exit program. return 0; } } return 0; } ////////////////////////////////////////////////////////////////////////////////////////////