text
stringlengths
5
1.04M
#include "text.h" void Text::draw() { device_context->DrawText(text, point.x, point.y); }
/////////////////////////////////////////////////////////////////////// // File: unicharset_training_utils.cpp // Description: Training utilities for UNICHARSET. // Author: Ray Smith // Created: Fri Oct 17 17:09:01 PDT 2014 // // (C) Copyright 2014, Google Inc. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////// #include "unicharset_training_utils.h" #include <stdlib.h> #include <string.h> #include <string> #include "fileio.h" #include BOSS_TESSERACT_U_genericvector_h //original-code:"genericvector.h" #include "icuerrorcode.h" #include "normstrngs.h" #include BOSS_TESSERACT_U_statistc_h //original-code:"statistc.h" #include BOSS_TESSERACT_U_strngs_h //original-code:"strngs.h" #include BOSS_TESSERACT_U_unicharset_h //original-code:"unicharset.h" #include "unicode/uchar.h" // from libicu #include "unicode/uscript.h" // from libicu namespace tesseract { // Helper sets the character attribute properties and sets up the script table. // Does not set tops and bottoms. void SetupBasicProperties(bool report_errors, UNICHARSET* unicharset) { for (int unichar_id = 0; unichar_id < unicharset->size(); ++unichar_id) { // Convert any custom ligatures. const char* unichar_str = unicharset->id_to_unichar(unichar_id); for (int i = 0; UNICHARSET::kCustomLigatures[i][0] != NULL; ++i) { if (!strcmp(UNICHARSET::kCustomLigatures[i][1], unichar_str)) { unichar_str = UNICHARSET::kCustomLigatures[i][0]; break; } } // Convert the unichar to UTF32 representation GenericVector<char32> uni_vector; tesseract::UTF8ToUTF32(unichar_str, &uni_vector); // Assume that if the property is true for any character in the string, // then it holds for the whole "character". bool unichar_isalpha = false; bool unichar_islower = false; bool unichar_isupper = false; bool unichar_isdigit = false; bool unichar_ispunct = false; for (int i = 0; i < uni_vector.size(); ++i) { if (u_isalpha(uni_vector[i])) unichar_isalpha = true; if (u_islower(uni_vector[i])) unichar_islower = true; if (u_isupper(uni_vector[i])) unichar_isupper = true; if (u_isdigit(uni_vector[i])) unichar_isdigit = true; if (u_ispunct(uni_vector[i])) unichar_ispunct = true; } unicharset->set_isalpha(unichar_id, unichar_isalpha); unicharset->set_islower(unichar_id, unichar_islower); unicharset->set_isupper(unichar_id, unichar_isupper); unicharset->set_isdigit(unichar_id, unichar_isdigit); unicharset->set_ispunctuation(unichar_id, unichar_ispunct); tesseract::IcuErrorCode err; unicharset->set_script(unichar_id, uscript_getName( uscript_getScript(uni_vector[0], err))); const int num_code_points = uni_vector.size(); // Obtain the lower/upper case if needed and record it in the properties. unicharset->set_other_case(unichar_id, unichar_id); if (unichar_islower || unichar_isupper) { GenericVector<char32> other_case(num_code_points, 0); for (int i = 0; i < num_code_points; ++i) { // TODO(daria): Ideally u_strToLower()/ustrToUpper() should be used. // However since they deal with UChars (so need a conversion function // from char32 or UTF8string) and require a meaningful locale string, // for now u_tolower()/u_toupper() are used. other_case[i] = unichar_islower ? u_toupper(uni_vector[i]) : u_tolower(uni_vector[i]); } STRING other_case_uch; tesseract::UTF32ToUTF8(other_case, &other_case_uch); UNICHAR_ID other_case_id = unicharset->unichar_to_id(other_case_uch.c_str()); if (other_case_id != INVALID_UNICHAR_ID) { unicharset->set_other_case(unichar_id, other_case_id); } else if (unichar_id >= SPECIAL_UNICHAR_CODES_COUNT && report_errors) { tprintf("Other case %s of %s is not in unicharset\n", other_case_uch.c_str(), unichar_str); } } // Set RTL property and obtain mirror unichar ID from ICU. GenericVector<char32> mirrors(num_code_points, 0); for (int i = 0; i < num_code_points; ++i) { mirrors[i] = u_charMirror(uni_vector[i]); if (i == 0) { // set directionality to that of the 1st code point unicharset->set_direction(unichar_id, static_cast<UNICHARSET::Direction>( u_charDirection(uni_vector[i]))); } } STRING mirror_uch; tesseract::UTF32ToUTF8(mirrors, &mirror_uch); UNICHAR_ID mirror_uch_id = unicharset->unichar_to_id(mirror_uch.c_str()); if (mirror_uch_id != INVALID_UNICHAR_ID) { unicharset->set_mirror(unichar_id, mirror_uch_id); } else if (report_errors) { tprintf("Mirror %s of %s is not in unicharset\n", mirror_uch.c_str(), unichar_str); } // Record normalized version of this unichar. STRING normed_str = tesseract::NormalizeUTF8String(unichar_str); if (unichar_id != 0 && normed_str.length() > 0) { unicharset->set_normed(unichar_id, normed_str.c_str()); } else { unicharset->set_normed(unichar_id, unichar_str); } ASSERT_HOST(unicharset->get_other_case(unichar_id) < unicharset->size()); } unicharset->post_load_setup(); } // Helper to set the properties for an input unicharset file, writes to the // output file. If an appropriate script unicharset can be found in the // script_dir directory, then the tops and bottoms are expanded using the // script unicharset. // If non-empty, xheight data for the fonts are written to the xheights_file. void SetPropertiesForInputFile(const string& script_dir, const string& input_unicharset_file, const string& output_unicharset_file, const string& output_xheights_file) { UNICHARSET unicharset; // Load the input unicharset unicharset.load_from_file(input_unicharset_file.c_str()); tprintf("Loaded unicharset of size %d from file %s\n", unicharset.size(), input_unicharset_file.c_str()); // Set unichar properties tprintf("Setting unichar properties\n"); SetupBasicProperties(true, &unicharset); string xheights_str; for (int s = 0; s < unicharset.get_script_table_size(); ++s) { // Load the unicharset for the script if available. string filename = script_dir + "/" + unicharset.get_script_from_script_id(s) + ".unicharset"; UNICHARSET script_set; if (script_set.load_from_file(filename.c_str())) { unicharset.SetPropertiesFromOther(script_set); } // Load the xheights for the script if available. filename = script_dir + "/" + unicharset.get_script_from_script_id(s) + ".xheights"; string script_heights; if (File::ReadFileToString(filename, &script_heights)) xheights_str += script_heights; } if (!output_xheights_file.empty()) File::WriteStringToFileOrDie(xheights_str, output_xheights_file); for (int c = SPECIAL_UNICHAR_CODES_COUNT; c < unicharset.size(); ++c) { if (unicharset.PropertiesIncomplete(c)) { tprintf("Warning: properties incomplete for index %d = %s\n", c, unicharset.id_to_unichar(c)); } } // Write the output unicharset tprintf("Writing unicharset to file %s\n", output_unicharset_file.c_str()); unicharset.save_to_file(output_unicharset_file.c_str()); } } // namespace tesseract
/*========================================================================= Program: Visualization Toolkit Module: ImagePlaneWidget.cxx Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ #include "vtkSmartPointer.h" #include "vtkActor.h" #include "vtkCamera.h" #include "vtkCellPicker.h" #include "vtkCommand.h" #include "vtkImageActor.h" #include "vtkImageMapper3D.h" #include "vtkImageMapToColors.h" #include "vtkImagePlaneWidget.h" #include "vtkImageReader.h" #include "vtkInteractorEventRecorder.h" #include "vtkLookupTable.h" #include "vtkOutlineFilter.h" #include "vtkPolyDataMapper.h" #include "vtkProperty.h" #include "vtkRenderWindow.h" #include "vtkRenderWindowInteractor.h" #include "vtkRenderer.h" #include "vtkVolume16Reader.h" #include "vtkImageData.h" #include "vtkTestUtilities.h" static char IPWeventLog[] = "# StreamVersion 1\n" "CharEvent 179 195 0 0 98 1 i\n" "MiddleButtonPressEvent 179 195 0 0 0 0 i\n" "MouseMoveEvent 179 190 0 0 0 0 i\n" "MouseMoveEvent 179 185 0 0 0 0 i\n" "MouseMoveEvent 179 180 0 0 0 0 i\n" "MouseMoveEvent 179 175 0 0 0 0 i\n" "MouseMoveEvent 179 170 0 0 0 0 i\n" "MouseMoveEvent 179 165 0 0 0 0 i\n" "MouseMoveEvent 179 160 0 0 0 0 i\n" "MouseMoveEvent 179 155 0 0 0 0 i\n" "MouseMoveEvent 179 150 0 0 0 0 i\n" "MouseMoveEvent 179 145 0 0 0 0 i\n" "MouseMoveEvent 179 140 0 0 0 0 i\n" "MouseMoveEvent 179 135 0 0 0 0 i\n" "MiddleButtonReleaseEvent 179 135 0 0 0 0 i\n" "RightButtonPressEvent 179 135 0 0 0 0 i\n" "MouseMoveEvent 180 135 0 0 0 0 i\n" "MouseMoveEvent 181 136 0 0 0 0 i\n" "MouseMoveEvent 181 137 0 0 0 0 i\n" "MouseMoveEvent 181 138 0 0 0 0 i\n" "MouseMoveEvent 181 139 0 0 0 0 i\n" "MouseMoveEvent 181 140 0 0 0 0 i\n" "MouseMoveEvent 180 140 0 0 0 0 i\n" "MouseMoveEvent 175 135 0 0 0 0 i\n" "MouseMoveEvent 170 130 0 0 0 0 i\n" "MouseMoveEvent 165 130 0 0 0 0 i\n" "MouseMoveEvent 160 130 0 0 0 0 i\n" "MouseMoveEvent 155 125 0 0 0 0 i\n" "MouseMoveEvent 150 120 0 0 0 0 i\n" "MouseMoveEvent 145 115 0 0 0 0 i\n" "MouseMoveEvent 140 110 0 0 0 0 i\n" "RightButtonReleaseEvent 140 110 0 0 0 0 i\n" "MouseMoveEvent 135 120 0 0 0 0 i\n" "MouseMoveEvent 130 135 0 0 0 0 i\n" "MouseMoveEvent 125 170 0 0 0 0 i\n" "MouseMoveEvent 120 180 0 0 0 0 i\n" "MouseMoveEvent 115 190 0 0 0 0 i\n" "MouseMoveEvent 110 200 0 0 0 0 i\n" "MouseMoveEvent 106 218 0 0 0 0 i\n" "LeftButtonPressEvent 106 218 0 0 0 0 i\n" "MouseMoveEvent 107 219 0 0 0 0 i\n" "MouseMoveEvent 110 218 0 0 0 0 i\n" "MouseMoveEvent 114 216 0 0 0 0 i\n" "MouseMoveEvent 118 214 0 0 0 0 i\n" "MouseMoveEvent 123 213 0 0 0 0 i\n" "MouseMoveEvent 128 212 0 0 0 0 i\n" "MouseMoveEvent 132 210 0 0 0 0 i\n" "MouseMoveEvent 138 207 0 0 0 0 i\n" "MouseMoveEvent 144 205 0 0 0 0 i\n" "MouseMoveEvent 150 203 0 0 0 0 i\n" "MouseMoveEvent 157 201 0 0 0 0 i\n" "MouseMoveEvent 164 200 0 0 0 0 i\n" "MouseMoveEvent 168 198 0 0 0 0 i\n" "MouseMoveEvent 176 196 0 0 0 0 i\n" "MouseMoveEvent 183 194 0 0 0 0 i\n" "MouseMoveEvent 190 192 0 0 0 0 i\n" "MouseMoveEvent 197 190 0 0 0 0 i\n" "MouseMoveEvent 199 189 0 0 0 0 i\n" "MouseMoveEvent 204 189 0 0 0 0 i\n" "MouseMoveEvent 206 189 0 0 0 0 i\n" "MouseMoveEvent 209 188 0 0 0 0 i\n" "MouseMoveEvent 211 187 0 0 0 0 i\n" "LeftButtonReleaseEvent 211 187 0 0 0 0 i\n" "MouseMoveEvent 259 183 0 0 0 0 i\n" "KeyPressEvent 259 183 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 259 183 8 0 0 0 Control_L\n" "MouseMoveEvent 261 183 8 0 0 0 Control_L\n" "MouseMoveEvent 263 182 8 0 0 0 Control_L\n" "MouseMoveEvent 266 181 8 0 0 0 Control_L\n" "MouseMoveEvent 268 180 8 0 0 0 Control_L\n" "MouseMoveEvent 270 179 8 0 0 0 Control_L\n" "MouseMoveEvent 273 178 8 0 0 0 Control_L\n" "MouseMoveEvent 276 177 8 0 0 0 Control_L\n" "MouseMoveEvent 279 176 8 0 0 0 Control_L\n" "MouseMoveEvent 282 175 8 0 0 0 Control_L\n" "MouseMoveEvent 287 174 8 0 0 0 Control_L\n" "MouseMoveEvent 286 173 8 0 0 0 Control_L\n" "MouseMoveEvent 284 173 8 0 0 0 Control_L\n" "MouseMoveEvent 281 174 8 0 0 0 Control_L\n" "MouseMoveEvent 277 175 8 0 0 0 Control_L\n" "MouseMoveEvent 274 176 8 0 0 0 Control_L\n" "MouseMoveEvent 269 177 8 0 0 0 Control_L\n" "MouseMoveEvent 267 177 8 0 0 0 Control_L\n" "KeyReleaseEvent 267 177 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 267 177 0 0 0 0 Control_L\n" "MouseMoveEvent 240 229 0 0 0 0 Control_L\n" "KeyPressEvent 240 229 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 240 229 8 0 0 0 Control_L\n" "MouseMoveEvent 240 230 8 0 0 0 Control_L\n" "MouseMoveEvent 240 235 8 0 0 0 Control_L\n" "MouseMoveEvent 240 240 8 0 0 0 Control_L\n" "MouseMoveEvent 240 245 8 0 0 0 Control_L\n" "MouseMoveEvent 240 250 8 0 0 0 Control_L\n" "MouseMoveEvent 241 255 8 0 0 0 Control_L\n" "MouseMoveEvent 242 260 8 0 0 0 Control_L\n" "MouseMoveEvent 242 265 8 0 0 0 Control_L\n" "MouseMoveEvent 242 260 8 0 0 0 Control_L\n" "MouseMoveEvent 242 255 8 0 0 0 Control_L\n" "MouseMoveEvent 242 250 8 0 0 0 Control_L\n" "MouseMoveEvent 242 245 8 0 0 0 Control_L\n" "MouseMoveEvent 242 240 8 0 0 0 Control_L\n" "MouseMoveEvent 241 238 8 0 0 0 Control_L\n" "KeyReleaseEvent 241 238 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 241 238 0 0 0 0 Control_L\n" "MouseMoveEvent 103 250 0 0 0 0 Control_L\n" "KeyPressEvent 103 250 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 103 250 8 0 0 0 Control_L\n" "MouseMoveEvent 100 250 8 0 0 0 Control_L\n" "MouseMoveEvent 97 251 8 0 0 0 Control_L\n" "MouseMoveEvent 94 251 8 0 0 0 Control_L\n" "MouseMoveEvent 91 252 8 0 0 0 Control_L\n" "MouseMoveEvent 90 253 8 0 0 0 Control_L\n" "MouseMoveEvent 85 253 8 0 0 0 Control_L\n" "MouseMoveEvent 80 253 8 0 0 0 Control_L\n" "MouseMoveEvent 85 253 8 0 0 0 Control_L\n" "KeyReleaseEvent 85 253 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 85 253 0 0 0 0 Control_L\n" "MouseMoveEvent 228 88 0 0 0 0 Control_L\n" "KeyPressEvent 228 88 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 228 88 8 0 0 0 Control_L\n" "MouseMoveEvent 228 86 8 0 0 0 Control_L\n" "MouseMoveEvent 227 83 8 0 0 0 Control_L\n" "MouseMoveEvent 226 83 8 0 0 0 Control_L\n" "MouseMoveEvent 225 80 8 0 0 0 Control_L\n" "MouseMoveEvent 225 75 8 0 0 0 Control_L\n" "MouseMoveEvent 224 70 8 0 0 0 Control_L\n" "MouseMoveEvent 223 70 8 0 0 0 Control_L\n" "MouseMoveEvent 223 75 8 0 0 0 Control_L\n" "MouseMoveEvent 222 80 8 0 0 0 Control_L\n" "MouseMoveEvent 222 85 8 0 0 0 Control_L\n" "MouseMoveEvent 222 90 8 0 0 0 Control_L\n" "KeyReleaseEvent 222 93 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 222 93 0 0 0 0 Control_L\n" "MouseMoveEvent 260 76 0 0 0 0 Control_L\n" "KeyPressEvent 260 76 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 260 76 8 0 0 0 Control_L\n" "MouseMoveEvent 260 75 8 0 0 0 Control_L\n" "MouseMoveEvent 261 72 8 0 0 0 Control_L\n" "MouseMoveEvent 262 69 8 0 0 0 Control_L\n" "MouseMoveEvent 263 67 8 0 0 0 Control_L\n" "MouseMoveEvent 263 65 8 0 0 0 Control_L\n" "MouseMoveEvent 264 63 8 0 0 0 Control_L\n" "MouseMoveEvent 265 61 8 0 0 0 Control_L\n" "MouseMoveEvent 266 60 8 0 0 0 Control_L\n" "MouseMoveEvent 266 55 8 0 0 0 Control_L\n" "MouseMoveEvent 267 53 8 0 0 0 Control_L\n" "KeyReleaseEvent 267 53 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 267 53 0 0 0 0 Control_L\n" "MouseMoveEvent 278 226 0 0 0 0 Control_L\n" "KeyPressEvent 278 226 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 278 226 8 0 0 0 Control_L\n" "MouseMoveEvent 278 227 8 0 0 0 Control_L\n" "MouseMoveEvent 278 230 8 0 0 0 Control_L\n" "MouseMoveEvent 280 232 8 0 0 0 Control_L\n" "MouseMoveEvent 282 234 8 0 0 0 Control_L\n" "MouseMoveEvent 284 237 8 0 0 0 Control_L\n" "MouseMoveEvent 286 239 8 0 0 0 Control_L\n" "MouseMoveEvent 287 242 8 0 0 0 Control_L\n" "MouseMoveEvent 290 245 8 0 0 0 Control_L\n" "MouseMoveEvent 292 247 8 0 0 0 Control_L\n" "MouseMoveEvent 293 249 8 0 0 0 Control_L\n" "KeyReleaseEvent 283 249 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 293 249 0 0 0 0 Control_L\n" "MouseMoveEvent 93 286 0 0 0 0 Control_L\n" "KeyPressEvent 93 286 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 93 286 8 0 0 0 Control_L\n" "MouseMoveEvent 92 288 8 0 0 0 Control_L\n" "MouseMoveEvent 90 290 8 0 0 0 Control_L\n" "MouseMoveEvent 87 292 8 0 0 0 Control_L\n" "MouseMoveEvent 84 295 8 0 0 0 Control_L\n" "MouseMoveEvent 82 297 8 0 0 0 Control_L\n" "MouseMoveEvent 80 298 8 0 0 0 Control_L\n" "MouseMoveEvent 78 300 8 0 0 0 Control_L\n" "KeyReleaseEvent 78 300 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 78 300 0 0 0 0 Control_L\n" "MouseMoveEvent 198 194 0 0 0 0 Control_L\n" "KeyPressEvent 198 194 -128 0 0 1 Control_L\n" "MiddleButtonPressEvent 198 194 8 0 0 0 Control_L\n" "MouseMoveEvent 196 194 8 0 0 0 Control_L\n" "MouseMoveEvent 191 192 8 0 0 0 Control_L\n" "MouseMoveEvent 185 189 8 0 0 0 Control_L\n" "MouseMoveEvent 182 187 8 0 0 0 Control_L\n" "MouseMoveEvent 180 186 8 0 0 0 Control_L\n" "MouseMoveEvent 178 185 8 0 0 0 Control_L\n" "MouseMoveEvent 177 180 8 0 0 0 Control_L\n" "MouseMoveEvent 178 179 8 0 0 0 Control_L\n" "MouseMoveEvent 179 178 8 0 0 0 Control_L\n" "MouseMoveEvent 179 177 8 0 0 0 Control_L\n" "MouseMoveEvent 182 176 8 0 0 0 Control_L\n" "MouseMoveEvent 187 175 8 0 0 0 Control_L\n" "MouseMoveEvent 190 177 8 0 0 0 Control_L\n" "MouseMoveEvent 190 179 8 0 0 0 Control_L\n" "KeyReleaseEvent 190 179 0 0 0 1 Control_L\n" "MiddleButtonReleaseEvent 190 179 0 0 0 0 Control_L\n" "KeyPressEvent 190 179 0 -128 0 1 Shift_L\n" "MiddleButtonPressEvent 190 179 0 4 0 0 Shift_L\n" "MouseMoveEvent 190 180 0 4 0 0 Shift_L\n" "MouseMoveEvent 190 185 0 4 0 0 Shift_L\n" "MouseMoveEvent 190 190 0 4 0 0 Shift_L\n" "MouseMoveEvent 191 194 0 4 0 0 Shift_L\n" "MouseMoveEvent 192 200 0 4 0 0 Shift_L\n" "MouseMoveEvent 192 206 0 4 0 0 Shift_L\n" "MouseMoveEvent 193 213 0 4 0 0 Shift_L\n" "MouseMoveEvent 193 209 0 4 0 0 Shift_L\n" "MouseMoveEvent 193 206 0 4 0 0 Shift_L\n" "MouseMoveEvent 193 200 0 4 0 0 Shift_L\n" "MouseMoveEvent 193 196 0 4 0 0 Shift_L\n" "MouseMoveEvent 193 190 0 4 0 0 Shift_L\n" "MouseMoveEvent 194 185 0 4 0 0 Shift_L\n" "MouseMoveEvent 196 180 0 4 0 0 Shift_L\n" "MouseMoveEvent 197 175 0 4 0 0 Shift_L\n" "MouseMoveEvent 198 172 0 4 0 0 Shift_L\n" "KeyReleaseEvent 198 172 0 0 0 1 Shift_L\n" "MiddleButtonReleaseEvent 198 172 0 0 0 0 Shift_L\n" "MouseMoveEvent 267 172 0 0 0 0 Shift_L\n" "MiddleButtonPressEvent 267 172 0 0 0 0 Shift_L\n" "MouseMoveEvent 264 171 0 0 0 0 Shift_L\n" "MouseMoveEvent 260 171 0 0 0 0 Shift_L\n" "MouseMoveEvent 255 171 0 0 0 0 Shift_L\n" "MouseMoveEvent 250 171 0 0 0 0 Shift_L\n" "MouseMoveEvent 245 172 0 0 0 0 Shift_L\n" "MiddleButtonReleaseEvent 245 172 0 0 0 0 Shift_L\n" "MouseMoveEvent 203 65 0 0 0 0 Shift_L\n" "MiddleButtonPressEvent 203 65 0 0 0 0 Shift_L\n" "MouseMoveEvent 200 65 0 0 0 0 Shift_L\n" "MouseMoveEvent 195 66 0 0 0 0 Shift_L\n" "MouseMoveEvent 193 67 0 0 0 0 Shift_L\n" "MouseMoveEvent 190 68 0 0 0 0 Shift_L\n" "MouseMoveEvent 184 71 0 0 0 0 Shift_L\n" "MouseMoveEvent 180 73 0 0 0 0 Shift_L\n" "MouseMoveEvent 178 74 0 0 0 0 Shift_L\n" "MouseMoveEvent 176 75 0 0 0 0 Shift_L\n" "MouseMoveEvent 175 76 0 0 0 0 Shift_L\n" "MouseMoveEvent 174 77 0 0 0 0 Shift_L\n" "MouseMoveEvent 173 78 0 0 0 0 Shift_L\n" "MouseMoveEvent 172 79 0 0 0 0 Shift_L\n" "MouseMoveEvent 170 80 0 0 0 0 Shift_L\n" "MouseMoveEvent 169 81 0 0 0 0 Shift_L\n" "MouseMoveEvent 168 82 0 0 0 0 Shift_L\n" "MouseMoveEvent 167 83 0 0 0 0 Shift_L\n" "MouseMoveEvent 166 84 0 0 0 0 Shift_L\n" "MouseMoveEvent 164 84 0 0 0 0 Shift_L\n" "MouseMoveEvent 163 85 0 0 0 0 Shift_L\n" "MouseMoveEvent 162 86 0 0 0 0 Shift_L\n" "MouseMoveEvent 160 86 0 0 0 0 Shift_L\n" "MouseMoveEvent 158 87 0 0 0 0 Shift_L\n" "MiddleButtonReleaseEvent 158 87 0 0 0 0 Shift_L\n" "MouseMoveEvent 95 251 0 0 0 0 Shift_L\n" "MiddleButtonPressEvent 95 251 0 0 0 0 Shift_L\n" "MouseMoveEvent 90 251 0 0 0 0 Shift_L\n" "MouseMoveEvent 85 252 0 0 0 0 Shift_L\n" "MouseMoveEvent 80 252 0 0 0 0 Shift_L\n" "MouseMoveEvent 75 252 0 0 0 0 Shift_L\n" "MouseMoveEvent 70 252 0 0 0 0 Shift_L\n" "MouseMoveEvent 65 251 0 0 0 0 Shift_L\n" "MiddleButtonReleaseEvent 65 251 0 0 0 0 Shift_L\n" "MouseMoveEvent 133 281 0 0 0 0 Shift_L\n" "MiddleButtonPressEvent 133 281 0 0 0 0 Shift_L\n" "MouseMoveEvent 130 280 0 0 0 0 Shift_L\n" "MouseMoveEvent 125 277 0 0 0 0 Shift_L\n" "MouseMoveEvent 120 274 0 0 0 0 Shift_L\n" "MouseMoveEvent 115 270 0 0 0 0 Shift_L\n" "MouseMoveEvent 113 267 0 0 0 0 Shift_L\n" "MouseMoveEvent 110 265 0 0 0 0 Shift_L\n" "MiddleButtonReleaseEvent 110 265 0 0 0 0 Shift_L\n" "MouseMoveEvent 99 286 0 0 0 0 Shift_L\n" "MiddleButtonPressEvent 99 286 0 0 0 0 Shift_L\n" "MouseMoveEvent 100 287 0 0 0 0 Shift_L\n" "MouseMoveEvent 105 289 0 0 0 0 Shift_L\n" "MouseMoveEvent 110 290 0 0 0 0 Shift_L\n" "MouseMoveEvent 115 290 0 0 0 0 Shift_L\n" "MouseMoveEvent 120 290 0 0 0 0 Shift_L\n" "MouseMoveEvent 125 285 0 0 0 0 Shift_L\n" "MouseMoveEvent 129 281 0 0 0 0 Shift_L\n" "MouseMoveEvent 130 279 0 0 0 0 Shift_L\n" "MouseMoveEvent 128 281 0 0 0 0 Shift_L\n" "MouseMoveEvent 126 282 0 0 0 0 Shift_L\n" "MouseMoveEvent 123 283 0 0 0 0 Shift_L\n" "MouseMoveEvent 120 284 0 0 0 0 Shift_L\n" "MouseMoveEvent 115 285 0 0 0 0 Shift_L\n" "MouseMoveEvent 110 286 0 0 0 0 Shift_L\n" "MouseMoveEvent 106 286 0 0 0 0 Shift_L\n" "MouseMoveEvent 102 286 0 0 0 0 Shift_L\n" "MouseMoveEvent 99 285 0 0 0 0 Shift_L\n" "MouseMoveEvent 95 283 0 0 0 0 Shift_L\n" "MouseMoveEvent 92 281 0 0 0 0 Shift_L\n" "MouseMoveEvent 89 279 0 0 0 0 Shift_L\n" "MouseMoveEvent 88 276 0 0 0 0 Shift_L\n" "MouseMoveEvent 86 274 0 0 0 0 Shift_L\n" "MiddleButtonReleaseEvent 86 274 0 0 0 0 Shift_L\n" ; //---------------------------------------------------------------------------- class vtkWidgetWindowLevelCallback : public vtkCommand { public: static vtkWidgetWindowLevelCallback *New() { return new vtkWidgetWindowLevelCallback; } void Execute( vtkObject *caller, unsigned long vtkNotUsed( event ), void *callData ) VTK_OVERRIDE { vtkImagePlaneWidget* self = reinterpret_cast< vtkImagePlaneWidget* >( caller ); if(!self) return; double* wl = static_cast<double*>( callData ); if ( self == this->WidgetX ) { this->WidgetY->SetWindowLevel(wl[0],wl[1],1); this->WidgetZ->SetWindowLevel(wl[0],wl[1],1); } else if( self == this->WidgetY ) { this->WidgetX->SetWindowLevel(wl[0],wl[1],1); this->WidgetZ->SetWindowLevel(wl[0],wl[1],1); } else if (self == this->WidgetZ) { this->WidgetX->SetWindowLevel(wl[0],wl[1],1); this->WidgetY->SetWindowLevel(wl[0],wl[1],1); } } vtkWidgetWindowLevelCallback():WidgetX( 0 ), WidgetY( 0 ), WidgetZ ( 0 ) {} vtkImagePlaneWidget* WidgetX; vtkImagePlaneWidget* WidgetY; vtkImagePlaneWidget* WidgetZ; }; int ImagePlaneWidget( int argc, char *argv[] ) { char* fname = vtkTestUtilities::ExpandDataFileName(argc, argv, "Data/headsq/quarter"); vtkSmartPointer<vtkVolume16Reader> v16 = vtkSmartPointer<vtkVolume16Reader>::New(); v16->SetDataDimensions( 64, 64); v16->SetDataByteOrderToLittleEndian(); v16->SetImageRange( 1, 93); v16->SetDataSpacing( 3.2, 3.2, 1.5); v16->SetFilePrefix( fname ); v16->SetDataMask( 0x7fff); v16->Update(); delete[] fname; vtkSmartPointer<vtkOutlineFilter> outline = vtkSmartPointer<vtkOutlineFilter>::New(); outline->SetInputConnection(v16->GetOutputPort()); vtkSmartPointer<vtkPolyDataMapper> outlineMapper = vtkSmartPointer<vtkPolyDataMapper>::New(); outlineMapper->SetInputConnection(outline->GetOutputPort()); vtkSmartPointer<vtkActor> outlineActor = vtkSmartPointer<vtkActor>::New(); outlineActor->SetMapper( outlineMapper); vtkSmartPointer<vtkRenderer> ren1 = vtkSmartPointer<vtkRenderer>::New(); vtkSmartPointer<vtkRenderer> ren2 = vtkSmartPointer<vtkRenderer>::New(); vtkSmartPointer<vtkRenderWindow> renWin = vtkSmartPointer<vtkRenderWindow>::New(); renWin->SetMultiSamples(0); renWin->AddRenderer(ren2); renWin->AddRenderer(ren1); vtkSmartPointer<vtkRenderWindowInteractor> iren = vtkSmartPointer<vtkRenderWindowInteractor>::New(); iren->SetRenderWindow(renWin); vtkSmartPointer<vtkCellPicker> picker = vtkSmartPointer<vtkCellPicker>::New(); picker->SetTolerance(0.005); vtkSmartPointer<vtkProperty> ipwProp = vtkSmartPointer<vtkProperty>::New(); //assign default props to the ipw's texture plane actor vtkSmartPointer<vtkImagePlaneWidget> planeWidgetX = vtkSmartPointer<vtkImagePlaneWidget>::New(); planeWidgetX->SetInteractor( iren); planeWidgetX->SetKeyPressActivationValue('x'); planeWidgetX->SetPicker(picker); planeWidgetX->RestrictPlaneToVolumeOn(); planeWidgetX->GetPlaneProperty()->SetColor(1,0,0); planeWidgetX->SetTexturePlaneProperty(ipwProp); planeWidgetX->TextureInterpolateOff(); planeWidgetX->SetResliceInterpolateToNearestNeighbour(); planeWidgetX->SetInputConnection(v16->GetOutputPort()); planeWidgetX->SetPlaneOrientationToXAxes(); planeWidgetX->SetSliceIndex(32); planeWidgetX->DisplayTextOn(); planeWidgetX->On(); planeWidgetX->InteractionOff(); planeWidgetX->InteractionOn(); vtkSmartPointer<vtkImagePlaneWidget> planeWidgetY = vtkSmartPointer<vtkImagePlaneWidget>::New(); planeWidgetY->SetInteractor( iren); planeWidgetY->SetKeyPressActivationValue('y'); planeWidgetY->SetPicker(picker); planeWidgetY->GetPlaneProperty()->SetColor(1,1,0); planeWidgetY->SetTexturePlaneProperty(ipwProp); planeWidgetY->TextureInterpolateOn(); planeWidgetY->SetResliceInterpolateToLinear(); planeWidgetY->SetInputConnection(v16->GetOutputPort()); planeWidgetY->SetPlaneOrientationToYAxes(); planeWidgetY->SetSlicePosition(102.4); planeWidgetY->SetLookupTable( planeWidgetX->GetLookupTable()); planeWidgetY->DisplayTextOff(); planeWidgetY->UpdatePlacement(); planeWidgetY->On(); vtkSmartPointer<vtkImagePlaneWidget> planeWidgetZ = vtkSmartPointer<vtkImagePlaneWidget>::New(); planeWidgetZ->SetInteractor( iren); planeWidgetZ->SetKeyPressActivationValue('z'); planeWidgetZ->SetPicker(picker); planeWidgetZ->GetPlaneProperty()->SetColor(0,0,1); planeWidgetZ->SetTexturePlaneProperty(ipwProp); planeWidgetZ->TextureInterpolateOn(); planeWidgetZ->SetResliceInterpolateToCubic(); planeWidgetZ->SetInputConnection(v16->GetOutputPort()); planeWidgetZ->SetPlaneOrientationToZAxes(); planeWidgetZ->SetSliceIndex(25); planeWidgetZ->SetLookupTable( planeWidgetX->GetLookupTable()); planeWidgetZ->DisplayTextOn(); planeWidgetZ->On(); vtkSmartPointer<vtkWidgetWindowLevelCallback> cbk = vtkSmartPointer<vtkWidgetWindowLevelCallback>::New(); cbk->WidgetX = planeWidgetX; cbk->WidgetY = planeWidgetY; cbk->WidgetZ = planeWidgetZ; planeWidgetX->AddObserver( vtkCommand::EndWindowLevelEvent, cbk ); planeWidgetY->AddObserver( vtkCommand::EndWindowLevelEvent, cbk ); planeWidgetZ->AddObserver( vtkCommand::EndWindowLevelEvent, cbk ); double wl[2]; planeWidgetZ->GetWindowLevel(wl); // Add a 2D image to test the GetReslice method // vtkSmartPointer<vtkImageMapToColors> colorMap = vtkSmartPointer<vtkImageMapToColors>::New(); colorMap->PassAlphaToOutputOff(); colorMap->SetActiveComponent(0); colorMap->SetOutputFormatToLuminance(); colorMap->SetInputData(planeWidgetZ->GetResliceOutput()); colorMap->SetLookupTable(planeWidgetX->GetLookupTable()); vtkSmartPointer<vtkImageActor> imageActor = vtkSmartPointer<vtkImageActor>::New(); imageActor->PickableOff(); imageActor->GetMapper()->SetInputConnection(colorMap->GetOutputPort()); // Add the actors // ren1->AddActor( outlineActor); ren2->AddActor( imageActor); ren1->SetBackground( 0.1, 0.1, 0.2); ren2->SetBackground( 0.2, 0.1, 0.2); renWin->SetSize( 600, 350); ren1->SetViewport(0,0,0.58333,1); ren2->SetViewport(0.58333,0,1,1); // Set the actors' postions // renWin->Render(); iren->SetEventPosition( 175,175); iren->SetKeyCode('r'); iren->InvokeEvent(vtkCommand::CharEvent,NULL); iren->SetEventPosition( 475,175); iren->SetKeyCode('r'); iren->InvokeEvent(vtkCommand::CharEvent,NULL); renWin->Render(); ren1->GetActiveCamera()->Elevation(110); ren1->GetActiveCamera()->SetViewUp(0, 0, -1); ren1->GetActiveCamera()->Azimuth(45); ren1->GetActiveCamera()->Dolly(1.15); ren1->ResetCameraClippingRange(); // Playback recorded events // vtkSmartPointer<vtkInteractorEventRecorder> recorder = vtkSmartPointer<vtkInteractorEventRecorder>::New(); recorder->SetInteractor(iren); recorder->ReadFromInputStringOn(); recorder->SetInputString(IPWeventLog); // Test SetKeyPressActivationValue for one of the widgets // iren->SetKeyCode('z'); iren->InvokeEvent(vtkCommand::CharEvent,NULL); iren->SetKeyCode('z'); iren->InvokeEvent(vtkCommand::CharEvent,NULL); recorder->Play(); // Remove the observers so we can go interactive. Without this the "-I" // testing option fails. recorder->Off(); iren->Initialize(); iren->Start(); return EXIT_SUCCESS; }
// Copyright (c) 2011-2013 The Bitcoin developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "optionsdialog.h" #include "ui_optionsdialog.h" #include "bitcoinunits.h" #include "monitoreddatamapper.h" #include "netbase.h" #include "optionsmodel.h" #include <QDir> #include <QIntValidator> #include <QLocale> #include <QMessageBox> OptionsDialog::OptionsDialog(QWidget *parent) : QDialog(parent), ui(new Ui::OptionsDialog), model(0), mapper(0), fRestartWarningDisplayed_Proxy(false), fRestartWarningDisplayed_Lang(false), fProxyIpValid(true) { ui->setupUi(this); /* Network elements init */ #ifndef USE_UPNP ui->mapPortUpnp->setEnabled(false); #endif ui->proxyIp->setEnabled(false); ui->proxyPort->setEnabled(false); ui->proxyPort->setValidator(new QIntValidator(1, 65535, this)); ui->socksVersion->setEnabled(false); ui->socksVersion->addItem("5", 5); ui->socksVersion->addItem("4", 4); ui->socksVersion->setCurrentIndex(0); connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyIp, SLOT(setEnabled(bool))); connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->proxyPort, SLOT(setEnabled(bool))); connect(ui->connectSocks, SIGNAL(toggled(bool)), ui->socksVersion, SLOT(setEnabled(bool))); connect(ui->connectSocks, SIGNAL(clicked(bool)), this, SLOT(showRestartWarning_Proxy())); ui->proxyIp->installEventFilter(this); /* Window elements init */ #ifdef Q_OS_MAC /* remove Window tab on Mac */ ui->tabWidget->removeTab(ui->tabWidget->indexOf(ui->tabWindow)); #endif /* Display elements init */ QDir translations(":translations"); ui->lang->addItem(QString("(") + tr("default") + QString(")"), QVariant("")); foreach(const QString &langStr, translations.entryList()) { QLocale locale(langStr); /** check if the locale name consists of 2 parts (language_country) */ if(langStr.contains("_")) { #if QT_VERSION >= 0x040800 /** display language strings as "native language - native country (locale name)", e.g. "Deutsch - Deutschland (de)" */ ui->lang->addItem(locale.nativeLanguageName() + QString(" - ") + locale.nativeCountryName() + QString(" (") + langStr + QString(")"), QVariant(langStr)); #else /** display language strings as "language - country (locale name)", e.g. "German - Germany (de)" */ ui->lang->addItem(QLocale::languageToString(locale.language()) + QString(" - ") + QLocale::countryToString(locale.country()) + QString(" (") + langStr + QString(")"), QVariant(langStr)); #endif } else { #if QT_VERSION >= 0x040800 /** display language strings as "native language (locale name)", e.g. "Deutsch (de)" */ ui->lang->addItem(locale.nativeLanguageName() + QString(" (") + langStr + QString(")"), QVariant(langStr)); #else /** display language strings as "language (locale name)", e.g. "German (de)" */ ui->lang->addItem(QLocale::languageToString(locale.language()) + QString(" (") + langStr + QString(")"), QVariant(langStr)); #endif } } ui->unit->setModel(new BitcoinUnits(this)); /* Widget-to-option mapper */ mapper = new MonitoredDataMapper(this); mapper->setSubmitPolicy(QDataWidgetMapper::ManualSubmit); mapper->setOrientation(Qt::Vertical); /* enable apply button when data modified */ connect(mapper, SIGNAL(viewModified()), this, SLOT(enableApplyButton())); /* disable apply button when new data loaded */ connect(mapper, SIGNAL(currentIndexChanged(int)), this, SLOT(disableApplyButton())); /* setup/change UI elements when proxy IP is invalid/valid */ connect(this, SIGNAL(proxyIpValid(QValidatedLineEdit *, bool)), this, SLOT(handleProxyIpValid(QValidatedLineEdit *, bool))); } OptionsDialog::~OptionsDialog() { delete ui; } void OptionsDialog::setModel(OptionsModel *model) { this->model = model; if(model) { connect(model, SIGNAL(displayUnitChanged(int)), this, SLOT(updateDisplayUnit())); mapper->setModel(model); setMapper(); mapper->toFirst(); } /* update the display unit, to not use the default ("BTC") */ updateDisplayUnit(); /* warn only when language selection changes by user action (placed here so init via mapper doesn't trigger this) */ connect(ui->lang, SIGNAL(valueChanged()), this, SLOT(showRestartWarning_Lang())); /* disable apply button after settings are loaded as there is nothing to save */ disableApplyButton(); } void OptionsDialog::setMapper() { /* Main */ mapper->addMapping(ui->bitcoinAtStartup, OptionsModel::StartAtStartup); /* Wallet */ mapper->addMapping(ui->transactionFee, OptionsModel::Fee); mapper->addMapping(ui->spendZeroConfChange, OptionsModel::SpendZeroConfChange); /* Network */ mapper->addMapping(ui->mapPortUpnp, OptionsModel::MapPortUPnP); mapper->addMapping(ui->connectSocks, OptionsModel::ProxyUse); mapper->addMapping(ui->proxyIp, OptionsModel::ProxyIP); mapper->addMapping(ui->proxyPort, OptionsModel::ProxyPort); mapper->addMapping(ui->socksVersion, OptionsModel::ProxySocksVersion); /* Window */ #ifndef Q_OS_MAC mapper->addMapping(ui->minimizeToTray, OptionsModel::MinimizeToTray); mapper->addMapping(ui->minimizeOnClose, OptionsModel::MinimizeOnClose); #endif /* Display */ mapper->addMapping(ui->lang, OptionsModel::Language); mapper->addMapping(ui->unit, OptionsModel::DisplayUnit); mapper->addMapping(ui->displayAddresses, OptionsModel::DisplayAddresses); mapper->addMapping(ui->coinControlFeatures, OptionsModel::CoinControlFeatures); } void OptionsDialog::enableApplyButton() { ui->applyButton->setEnabled(true); } void OptionsDialog::disableApplyButton() { ui->applyButton->setEnabled(false); } void OptionsDialog::enableSaveButtons() { /* prevent enabling of the save buttons when data modified, if there is an invalid proxy address present */ if(fProxyIpValid) setSaveButtonState(true); } void OptionsDialog::disableSaveButtons() { setSaveButtonState(false); } void OptionsDialog::setSaveButtonState(bool fState) { ui->applyButton->setEnabled(fState); ui->okButton->setEnabled(fState); } void OptionsDialog::on_resetButton_clicked() { if(model) { // confirmation dialog QMessageBox::StandardButton btnRetVal = QMessageBox::question(this, tr("Confirm options reset"), tr("Some settings may require a client restart to take effect.") + "<br><br>" + tr("Do you want to proceed?"), QMessageBox::Yes | QMessageBox::Cancel, QMessageBox::Cancel); if(btnRetVal == QMessageBox::Cancel) return; disableApplyButton(); /* disable restart warning messages display */ fRestartWarningDisplayed_Lang = fRestartWarningDisplayed_Proxy = true; /* reset all options and save the default values (QSettings) */ model->Reset(); mapper->toFirst(); mapper->submit(); /* re-enable restart warning messages display */ fRestartWarningDisplayed_Lang = fRestartWarningDisplayed_Proxy = false; } } void OptionsDialog::on_okButton_clicked() { mapper->submit(); accept(); } void OptionsDialog::on_cancelButton_clicked() { reject(); } void OptionsDialog::on_applyButton_clicked() { mapper->submit(); disableApplyButton(); } void OptionsDialog::showRestartWarning_Proxy() { if(!fRestartWarningDisplayed_Proxy) { QMessageBox::warning(this, tr("Warning"), tr("This setting will take effect after restarting Camelcoin."), QMessageBox::Ok); fRestartWarningDisplayed_Proxy = true; } } void OptionsDialog::showRestartWarning_Lang() { if(!fRestartWarningDisplayed_Lang) { QMessageBox::warning(this, tr("Warning"), tr("This setting will take effect after restarting Camelcoin."), QMessageBox::Ok); fRestartWarningDisplayed_Lang = true; } } void OptionsDialog::updateDisplayUnit() { if(model) { /* Update transactionFee with the current unit */ ui->transactionFee->setDisplayUnit(model->getDisplayUnit()); } } void OptionsDialog::handleProxyIpValid(QValidatedLineEdit *object, bool fState) { // this is used in a check before re-enabling the save buttons fProxyIpValid = fState; if(fProxyIpValid) { enableSaveButtons(); ui->statusLabel->clear(); } else { disableSaveButtons(); object->setValid(fProxyIpValid); ui->statusLabel->setStyleSheet("QLabel { color: red; }"); ui->statusLabel->setText(tr("The supplied proxy address is invalid.")); } } bool OptionsDialog::eventFilter(QObject *object, QEvent *event) { if(event->type() == QEvent::FocusOut) { if(object == ui->proxyIp) { CService addr; /* Check proxyIp for a valid IPv4/IPv6 address and emit the proxyIpValid signal */ emit proxyIpValid(ui->proxyIp, LookupNumeric(ui->proxyIp->text().toStdString().c_str(), addr)); } } return QDialog::eventFilter(object, event); }
//========================================================================= // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //========================================================================= #include "smtk/attribute/Attribute.h" #include "smtk/attribute/Definition.h" #include "smtk/attribute/FileItem.h" #include "smtk/attribute/Resource.h" #include "smtk/attribute/StringItem.h" #include "smtk/io/AttributeReader.h" #include "smtk/io/Logger.h" #include <iostream> namespace { const char* testInput = "<?xml version=\"1.0\" encoding=\"utf-8\" ?> " "<SMTK_AttributeSystem Version=\"2\"> " " <Definitions> " " <AttDef Type=\"att1\" BaseType=\"\"> " " <ItemDefinitions> " " <String Name=\"myStrings\" Extensible=\"1\" " " NumberOfRequiredValues=\"1\"> " " </String> " " <File Name=\"myFiles\" Extensible=\"1\" ShouldExist=\"false\" " " NumberOfRequiredValues=\"1\"> " " </File> " " </ItemDefinitions> " " </AttDef> " " </Definitions> " " <Attributes> " " <Att Name=\"att\" Type=\"att1\"/> " " </Attributes> " "</SMTK_AttributeSystem> "; } int main() { smtk::attribute::ResourcePtr resptr = smtk::attribute::Resource::create(); smtk::attribute::Resource& resource(*resptr.get()); smtk::io::Logger logger; smtk::io::AttributeReader reader; if (reader.readContents(resptr, testInput, logger)) { std::cerr << "Encountered Errors while reading input data\n"; std::cerr << logger.convertToString(); return -2; } std::vector<smtk::attribute::AttributePtr> atts; resource.attributes(atts); if (atts.size() != 1) { std::cerr << "Unexpected number of attributes: " << atts.size() << "\n"; std::cerr << logger.convertToString(); return -2; } smtk::attribute::AttributePtr att = atts[0]; smtk::attribute::StringItemPtr myStrings = att->findString("myStrings"); myStrings->setNumberOfValues(2); myStrings->setValue(0, "string0"); myStrings->setValue(1, "string1"); smtk::attribute::FileItemPtr myFiles = att->findFile("myFiles"); myFiles->setNumberOfValues(2); myFiles->setValue(0, "/path/to/file0"); myFiles->setValue(1, "/path/to/file1"); { std::vector<smtk::attribute::AttributePtr> copiedAtts; resource.attributes(copiedAtts); if (copiedAtts.size() != 1) { std::cerr << "Unexpected number of attributes: " << copiedAtts.size() << "\n"; std::cerr << logger.convertToString(); return -2; } smtk::attribute::AttributePtr copiedAtt = copiedAtts[0]; smtk::attribute::StringItemPtr myCopiedStrings = copiedAtt->findString("myStrings"); smtk::attribute::ItemPtr myStringsAsItems = std::static_pointer_cast<smtk::attribute::Item>(myStrings); smtk::attribute::ConstItemPtr myStringsAsConstItems = std::const_pointer_cast<smtk::attribute::Item>(myStringsAsItems); myCopiedStrings->assign(myStringsAsConstItems); if (myCopiedStrings->numberOfValues() != myStrings->numberOfValues()) { std::cerr << "Unexpected number of string values: " << myCopiedStrings->numberOfValues() << " vs " << myStrings->numberOfValues() << "\n"; std::cerr << logger.convertToString(); return -2; } smtk::attribute::FileItemPtr myCopiedFiles = copiedAtt->findFile("myFiles"); smtk::attribute::ItemPtr myFilesAsItems = std::static_pointer_cast<smtk::attribute::Item>(myFiles); smtk::attribute::ConstItemPtr myFilesAsConstItems = std::const_pointer_cast<smtk::attribute::Item>(myFilesAsItems); myCopiedFiles->assign(myFilesAsConstItems); if (myCopiedFiles->numberOfValues() != myFiles->numberOfValues()) { std::cerr << "Unexpected number of file values: " << myCopiedFiles->numberOfValues() << " vs " << myFiles->numberOfValues() << "\n"; std::cerr << logger.convertToString(); return -2; } } { smtk::attribute::AttributePtr copiedAtt = resource.copyAttribute(att); if (!copiedAtt) { std::cerr << "Failed to copy attribute" << "\n"; std::cerr << logger.convertToString(); return -2; } if (!copiedAtt->isValid()) { std::cerr << "Copied attribute is invalid" << "\n"; std::cerr << logger.convertToString(); return -2; } if (copiedAtt->numberOfItems() != 2) { std::cerr << "Copy attribute produced unexpected results" << "\n"; std::cerr << logger.convertToString(); return -2; } smtk::attribute::StringItemPtr myCopiedStrings = copiedAtt->findString("myStrings"); if (myCopiedStrings->numberOfValues() != myStrings->numberOfValues()) { std::cerr << "Unexpected number of string values: " << myCopiedStrings->numberOfValues() << " vs " << myStrings->numberOfValues() << "\n"; std::cerr << logger.convertToString(); return -2; } smtk::attribute::FileItemPtr myCopiedFiles = copiedAtt->findFile("myFiles"); if (myCopiedFiles->numberOfValues() != myFiles->numberOfValues()) { std::cerr << "Unexpected number of file values: " << myCopiedFiles->numberOfValues() << " vs " << myFiles->numberOfValues() << "\n"; std::cerr << logger.convertToString(); return -2; } } return 0; }
// PropSheet.cpp : implementation file // #include "stdafx.h" #include "JWM1400CE.h" #include "PropSheet.h" #ifdef _DEBUG #define new DEBUG_NEW #undef THIS_FILE static char THIS_FILE[] = __FILE__; #endif ///////////////////////////////////////////////////////////////////////////// // CPropSheet IMPLEMENT_DYNAMIC(CPropSheet, CPropertySheet) CPropSheet::CPropSheet(UINT nIDCaption, CWnd* pParentWnd, UINT iSelectPage) :CPropertySheet(nIDCaption, pParentWnd, iSelectPage) { } CPropSheet::CPropSheet(LPCTSTR pszCaption, CWnd* pParentWnd, UINT iSelectPage) :CPropertySheet(pszCaption, pParentWnd, iSelectPage) { } CPropSheet::~CPropSheet() { } BEGIN_MESSAGE_MAP(CPropSheet, CPropertySheet) //{{AFX_MSG_MAP(CPropSheet) // NOTE - the ClassWizard will add and remove mapping macros here. //}}AFX_MSG_MAP END_MESSAGE_MAP() ///////////////////////////////////////////////////////////////////////////// // CPropSheet message handlers BOOL CPropSheet::OnInitDialog() { BOOL bResult = CPropertySheet::OnInitDialog(); // TODO: Add your specialized code here int ids[] = { IDOK, IDCANCEL, ID_APPLY_NOW, IDHELP }; for(int i = 0; i < sizeof(ids)/sizeof(int); ++i ) { CWnd* pWnd = GetDlgItem(ids[i]); if ( pWnd ) pWnd->ShowWindow(SW_HIDE); } CRect rectOK; CWnd* pWnd = GetDlgItem( IDOK ); if ( pWnd ) pWnd->GetWindowRect( &rectOK ); CRect rect; GetWindowRect( &rect ); SetWindowPos(NULL, 0, 0,rect.Width() + 100,rect.Height()-2,SWP_NOMOVE | SWP_NOZORDER | SWP_NOACTIVATE); #define FULL_SCREEN #ifdef FULL_SCREEN /// Full Screen int iFullWidth = GetSystemMetrics(SM_CXSCREEN); int iFullHeight = GetSystemMetrics(SM_CYSCREEN); ::SetWindowPos(this->m_hWnd, HWND_TOPMOST, 0, 0, iFullWidth, iFullHeight, SWP_NOOWNERZORDER|SWP_SHOWWINDOW); #endif static CFont m_fontEdit; m_fontEdit.CreateFont( 25, 0, 0, 30, 700, 0, 0, 0, 1, 0, 0, 0, 0, _T("Arial") ); GetTabControl()->SetFont( &m_fontEdit ); this->ModifyStyle( WS_SYSMENU | WS_CAPTION | CS_DBLCLKS , 0, 0 ); return bResult; } BOOL CPropSheet::PreTranslateMessage(MSG* pMsg) { // TODO: Add your specialized code here and/or call the base class if (pMsg->message == WM_KEYDOWN) { if (pMsg->wParam == VK_RETURN || pMsg->wParam == VK_ESCAPE) { return TRUE; } } return CPropertySheet::PreTranslateMessage(pMsg); }
/* * * Copyright (c) 2020 Project CHIP Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "Discovery_ImplPlatform.h" #include <inttypes.h> #include "ServiceNaming.h" #include "lib/core/CHIPSafeCasts.h" #include "lib/mdns/platform/Mdns.h" #include "lib/support/logging/CHIPLogging.h" #include "platform/CHIPDeviceConfig.h" #include "platform/CHIPDeviceLayer.h" #include "setup_payload/AdditionalDataPayloadGenerator.h" #include "support/CodeUtils.h" #include "support/ErrorStr.h" #include "support/RandUtils.h" namespace chip { namespace Mdns { DiscoveryImplPlatform DiscoveryImplPlatform::sManager; DiscoveryImplPlatform::DiscoveryImplPlatform() = default; CHIP_ERROR DiscoveryImplPlatform::Init() { if (!mMdnsInitialized) { ReturnErrorOnFailure(ChipMdnsInit(HandleMdnsInit, HandleMdnsError, this)); mCommissionInstanceName = GetRandU64(); mMdnsInitialized = true; } return CHIP_NO_ERROR; } CHIP_ERROR DiscoveryImplPlatform::Start(Inet::InetLayer * inetLayer, uint16_t port) { ReturnErrorOnFailure(Init()); CHIP_ERROR error = ChipMdnsStopPublish(); if (error != CHIP_NO_ERROR) { ChipLogError(Discovery, "Failed to initialize platform mdns: %s", ErrorStr(error)); } return error; } void DiscoveryImplPlatform::HandleMdnsInit(void * context, CHIP_ERROR initError) { DiscoveryImplPlatform * publisher = static_cast<DiscoveryImplPlatform *>(context); if (initError == CHIP_NO_ERROR) { publisher->mMdnsInitialized = true; } else { ChipLogError(Discovery, "mDNS initialization failed with %s", chip::ErrorStr(initError)); publisher->mMdnsInitialized = false; } } void DiscoveryImplPlatform::HandleMdnsError(void * context, CHIP_ERROR error) { DiscoveryImplPlatform * publisher = static_cast<DiscoveryImplPlatform *>(context); if (error == CHIP_ERROR_FORCED_RESET) { if (publisher->mIsOperationalPublishing) { publisher->Advertise(publisher->mOperationalAdvertisingParams); } if (publisher->mIsCommissionableNodePublishing) { publisher->Advertise(publisher->mCommissionableNodeAdvertisingParams); } if (publisher->mIsCommissionerPublishing) { publisher->Advertise(publisher->mCommissionerAdvertisingParams); } } else { ChipLogError(Discovery, "mDNS error: %s", chip::ErrorStr(error)); } } CHIP_ERROR DiscoveryImplPlatform::SetupHostname(chip::ByteSpan macOrEui64) { char nameBuffer[17]; CHIP_ERROR error = MakeHostName(nameBuffer, sizeof(nameBuffer), macOrEui64); if (error != CHIP_NO_ERROR) { ChipLogError(Discovery, "Failed to create mdns hostname: %s", ErrorStr(error)); return error; } error = ChipMdnsSetHostname(nameBuffer); if (error != CHIP_NO_ERROR) { ChipLogError(Discovery, "Failed to setup mdns hostname: %s", ErrorStr(error)); return error; } return CHIP_NO_ERROR; } CHIP_ERROR DiscoveryImplPlatform::Advertise(const CommissionAdvertisingParameters & params) { CHIP_ERROR error = CHIP_NO_ERROR; MdnsService service; // add newline to lengths for TXT entries char discriminatorBuf[kKeyDiscriminatorMaxLength + 1]; char vendorProductBuf[kKeyVendorProductMaxLength + 1]; char commissioningModeBuf[kKeyCommissioningModeMaxLength + 1]; char additionalPairingBuf[kKeyAdditionalPairingMaxLength + 1]; char deviceTypeBuf[kKeyDeviceTypeMaxLength + 1]; char deviceNameBuf[kKeyDeviceNameMaxLength + 1]; char rotatingIdBuf[kKeyRotatingIdMaxLength + 1]; char pairingHintBuf[kKeyPairingHintMaxLength + 1]; char pairingInstrBuf[kKeyPairingInstructionMaxLength + 1]; // size of textEntries array should be count of Bufs above TextEntry textEntries[9]; size_t textEntrySize = 0; // add underscore, character and newline to lengths for sub types (ex. _S<ddd>) char shortDiscriminatorSubtype[kSubTypeShortDiscriminatorMaxLength + 3]; char longDiscriminatorSubtype[kSubTypeLongDiscriminatorMaxLength + 4]; char vendorSubType[kSubTypeVendorMaxLength + 3]; char commissioningModeSubType[kSubTypeCommissioningModeMaxLength + 3]; char openWindowSubType[kSubTypeAdditionalPairingMaxLength + 3]; char deviceTypeSubType[kSubTypeDeviceTypeMaxLength + 3]; // size of subTypes array should be count of SubTypes above const char * subTypes[6]; size_t subTypeSize = 0; if (!mMdnsInitialized) { return CHIP_ERROR_INCORRECT_STATE; } ReturnErrorOnFailure(SetupHostname(params.GetMac())); snprintf(service.mName, sizeof(service.mName), "%08" PRIX32 "%08" PRIX32, static_cast<uint32_t>(mCommissionInstanceName >> 32), static_cast<uint32_t>(mCommissionInstanceName)); if (params.GetCommissionAdvertiseMode() == CommssionAdvertiseMode::kCommissionableNode) { strncpy(service.mType, "_chipc", sizeof(service.mType)); } else { strncpy(service.mType, "_chipd", sizeof(service.mType)); } service.mProtocol = MdnsServiceProtocol::kMdnsProtocolUdp; if (params.GetVendorId().HasValue()) { if (params.GetProductId().HasValue()) { snprintf(vendorProductBuf, sizeof(vendorProductBuf), "%u+%u", params.GetVendorId().Value(), params.GetProductId().Value()); } else { snprintf(vendorProductBuf, sizeof(vendorProductBuf), "%u", params.GetVendorId().Value()); } textEntries[textEntrySize++] = { "VP", reinterpret_cast<const uint8_t *>(vendorProductBuf), strnlen(vendorProductBuf, sizeof(vendorProductBuf)) }; } if (params.GetDeviceType().HasValue()) { snprintf(deviceTypeBuf, sizeof(deviceTypeBuf), "%u", params.GetDeviceType().Value()); textEntries[textEntrySize++] = { "DT", reinterpret_cast<const uint8_t *>(deviceTypeBuf), strnlen(deviceTypeBuf, sizeof(deviceTypeBuf)) }; } if (params.GetDeviceName().HasValue()) { snprintf(deviceNameBuf, sizeof(deviceNameBuf), "%s", params.GetDeviceName().Value()); textEntries[textEntrySize++] = { "DN", reinterpret_cast<const uint8_t *>(deviceNameBuf), strnlen(deviceNameBuf, sizeof(deviceNameBuf)) }; } // Following fields are for nodes and not for commissioners if (params.GetCommissionAdvertiseMode() == CommssionAdvertiseMode::kCommissionableNode) { snprintf(discriminatorBuf, sizeof(discriminatorBuf), "%04u", params.GetLongDiscriminator()); textEntries[textEntrySize++] = { "D", reinterpret_cast<const uint8_t *>(discriminatorBuf), strnlen(discriminatorBuf, sizeof(discriminatorBuf)) }; snprintf(commissioningModeBuf, sizeof(commissioningModeBuf), "%u", params.GetCommissioningMode() ? 1 : 0); textEntries[textEntrySize++] = { "CM", reinterpret_cast<const uint8_t *>(commissioningModeBuf), strnlen(commissioningModeBuf, sizeof(commissioningModeBuf)) }; if (params.GetCommissioningMode() && params.GetOpenWindowCommissioningMode()) { snprintf(additionalPairingBuf, sizeof(additionalPairingBuf), "1"); textEntries[textEntrySize++] = { "AP", reinterpret_cast<const uint8_t *>(additionalPairingBuf), strnlen(additionalPairingBuf, sizeof(additionalPairingBuf)) }; } if (params.GetRotatingId().HasValue()) { snprintf(rotatingIdBuf, sizeof(rotatingIdBuf), "%s", params.GetRotatingId().Value()); textEntries[textEntrySize++] = { "RI", reinterpret_cast<const uint8_t *>(rotatingIdBuf), strnlen(rotatingIdBuf, sizeof(rotatingIdBuf)) }; } if (params.GetPairingHint().HasValue()) { snprintf(pairingHintBuf, sizeof(pairingHintBuf), "%u", params.GetPairingHint().Value()); textEntries[textEntrySize++] = { "PH", reinterpret_cast<const uint8_t *>(pairingHintBuf), strnlen(pairingHintBuf, sizeof(pairingHintBuf)) }; } if (params.GetPairingInstr().HasValue()) { snprintf(pairingInstrBuf, sizeof(pairingInstrBuf), "%s", params.GetPairingInstr().Value()); textEntries[textEntrySize++] = { "PI", reinterpret_cast<const uint8_t *>(pairingInstrBuf), strnlen(pairingInstrBuf, sizeof(pairingInstrBuf)) }; } if (MakeServiceSubtype(shortDiscriminatorSubtype, sizeof(shortDiscriminatorSubtype), DiscoveryFilter(DiscoveryFilterType::kShort, params.GetShortDiscriminator())) == CHIP_NO_ERROR) { subTypes[subTypeSize++] = shortDiscriminatorSubtype; } if (MakeServiceSubtype(longDiscriminatorSubtype, sizeof(longDiscriminatorSubtype), DiscoveryFilter(DiscoveryFilterType::kLong, params.GetLongDiscriminator())) == CHIP_NO_ERROR) { subTypes[subTypeSize++] = longDiscriminatorSubtype; } if (MakeServiceSubtype(commissioningModeSubType, sizeof(commissioningModeSubType), DiscoveryFilter(DiscoveryFilterType::kCommissioningMode, params.GetCommissioningMode() ? 1 : 0)) == CHIP_NO_ERROR) { subTypes[subTypeSize++] = commissioningModeSubType; } if (params.GetCommissioningMode() && params.GetOpenWindowCommissioningMode()) { if (MakeServiceSubtype(openWindowSubType, sizeof(openWindowSubType), DiscoveryFilter(DiscoveryFilterType::kCommissioningModeFromCommand, 1)) == CHIP_NO_ERROR) { subTypes[subTypeSize++] = openWindowSubType; } } } if (params.GetVendorId().HasValue()) { if (MakeServiceSubtype(vendorSubType, sizeof(vendorSubType), DiscoveryFilter(DiscoveryFilterType::kVendor, params.GetVendorId().Value())) == CHIP_NO_ERROR) { subTypes[subTypeSize++] = vendorSubType; } } if (params.GetDeviceType().HasValue()) { if (MakeServiceSubtype(deviceTypeSubType, sizeof(deviceTypeSubType), DiscoveryFilter(DiscoveryFilterType::kDeviceType, params.GetDeviceType().Value())) == CHIP_NO_ERROR) { subTypes[subTypeSize++] = deviceTypeSubType; } } service.mTextEntries = textEntries; service.mTextEntrySize = textEntrySize; service.mPort = CHIP_PORT; service.mInterface = INET_NULL_INTERFACEID; service.mSubTypes = subTypes; service.mSubTypeSize = subTypeSize; service.mAddressType = Inet::kIPAddressType_Any; error = ChipMdnsPublishService(&service); if (error == CHIP_NO_ERROR) { if (params.GetCommissionAdvertiseMode() == CommssionAdvertiseMode::kCommissionableNode) { mCommissionableNodeAdvertisingParams = params; mIsCommissionableNodePublishing = true; } else { mCommissionerAdvertisingParams = params; mIsCommissionerPublishing = true; } } #ifdef DETAIL_LOGGING PrintEntries(&service); #endif return error; } #ifdef DETAIL_LOGGING void DiscoveryImplPlatform::PrintEntries(const MdnsService * service) { printf("printEntries port=%d, mTextEntrySize=%d, mSubTypeSize=%d\n", (int) (service->mPort), (int) (service->mTextEntrySize), (int) (service->mSubTypeSize)); for (int i = 0; i < (int) service->mTextEntrySize; i++) { printf(" entry [%d] : %s %s\n", i, service->mTextEntries[i].mKey, (char *) (service->mTextEntries[i].mData)); } for (int i = 0; i < (int) service->mSubTypeSize; i++) { printf(" type [%d] : %s\n", i, service->mSubTypes[i]); } } #endif CHIP_ERROR DiscoveryImplPlatform::Advertise(const OperationalAdvertisingParameters & params) { MdnsService service; CHIP_ERROR error = CHIP_NO_ERROR; mOperationalAdvertisingParams = params; // TODO: There may be multilple device/fabric ids after multi-admin. // According to spec CRI and CRA intervals should not exceed 1 hour (3600000 ms). // TODO: That value should be defined in the ReliableMessageProtocolConfig.h, // but for now it is not possible to access it from src/lib/mdns. It should be // refactored after creating common DNS-SD layer. constexpr uint32_t kMaxCRMPRetryInterval = 3600000; // kMaxCRMPRetryInterval max value is 3600000, what gives 7 characters and newline // necessary to represent it in the text form. constexpr uint8_t kMaxCRMPRetryBufferSize = 7 + 1; char crmpRetryIntervalIdleBuf[kMaxCRMPRetryBufferSize]; char crmpRetryIntervalActiveBuf[kMaxCRMPRetryBufferSize]; TextEntry crmpRetryIntervalEntries[OperationalAdvertisingParameters::kNumAdvertisingTxtEntries]; size_t textEntrySize = 0; uint32_t crmpRetryIntervalIdle, crmpRetryIntervalActive; int writtenCharactersNumber; params.GetCRMPRetryIntervals(crmpRetryIntervalIdle, crmpRetryIntervalActive); // TODO: Issue #5833 - CRMP retry intervals should be updated on the poll period value // change or device type change. #if CHIP_DEVICE_CONFIG_ENABLE_THREAD if (chip::DeviceLayer::ConnectivityMgr().GetThreadDeviceType() == chip::DeviceLayer::ConnectivityManager::kThreadDeviceType_SleepyEndDevice) { uint32_t sedPollPeriod; ReturnErrorOnFailure(chip::DeviceLayer::ThreadStackMgr().GetPollPeriod(sedPollPeriod)); // Increment default CRMP retry intervals by SED poll period to be on the safe side // and avoid unnecessary retransmissions. crmpRetryIntervalIdle += sedPollPeriod; crmpRetryIntervalActive += sedPollPeriod; } #endif if (crmpRetryIntervalIdle > kMaxCRMPRetryInterval) { ChipLogProgress(Discovery, "CRMP retry interval idle value exceeds allowed range of 1 hour, using maximum available", chip::ErrorStr(error)); crmpRetryIntervalIdle = kMaxCRMPRetryInterval; } writtenCharactersNumber = snprintf(crmpRetryIntervalIdleBuf, sizeof(crmpRetryIntervalIdleBuf), "%" PRIu32, crmpRetryIntervalIdle); VerifyOrReturnError((writtenCharactersNumber > 0) && (writtenCharactersNumber < kMaxCRMPRetryBufferSize), CHIP_ERROR_INVALID_STRING_LENGTH); crmpRetryIntervalEntries[textEntrySize++] = { "CRI", reinterpret_cast<const uint8_t *>(crmpRetryIntervalIdleBuf), strlen(crmpRetryIntervalIdleBuf) }; if (crmpRetryIntervalActive > kMaxCRMPRetryInterval) { ChipLogProgress(Discovery, "CRMP retry interval active value exceeds allowed range of 1 hour, using maximum available", chip::ErrorStr(error)); crmpRetryIntervalActive = kMaxCRMPRetryInterval; } writtenCharactersNumber = snprintf(crmpRetryIntervalActiveBuf, sizeof(crmpRetryIntervalActiveBuf), "%" PRIu32, crmpRetryIntervalActive); VerifyOrReturnError((writtenCharactersNumber > 0) && (writtenCharactersNumber < kMaxCRMPRetryBufferSize), CHIP_ERROR_INVALID_STRING_LENGTH); crmpRetryIntervalEntries[textEntrySize++] = { "CRA", reinterpret_cast<const uint8_t *>(crmpRetryIntervalActiveBuf), strlen(crmpRetryIntervalActiveBuf) }; ReturnErrorOnFailure(SetupHostname(params.GetMac())); ReturnErrorOnFailure(MakeInstanceName(service.mName, sizeof(service.mName), params.GetPeerId())); strncpy(service.mType, "_chip", sizeof(service.mType)); service.mProtocol = MdnsServiceProtocol::kMdnsProtocolTcp; service.mPort = CHIP_PORT; service.mTextEntries = crmpRetryIntervalEntries; service.mTextEntrySize = textEntrySize; service.mInterface = INET_NULL_INTERFACEID; service.mAddressType = Inet::kIPAddressType_Any; service.mSubTypeSize = 0; error = ChipMdnsPublishService(&service); if (error == CHIP_NO_ERROR) { mIsOperationalPublishing = true; } return error; } CHIP_ERROR DiscoveryImplPlatform::StopPublishDevice() { CHIP_ERROR error = ChipMdnsStopPublish(); if (error == CHIP_NO_ERROR) { mIsOperationalPublishing = false; mIsCommissionableNodePublishing = false; mIsCommissionerPublishing = false; } return error; } CHIP_ERROR DiscoveryImplPlatform::SetResolverDelegate(ResolverDelegate * delegate) { VerifyOrReturnError(delegate == nullptr || mResolverDelegate == nullptr, CHIP_ERROR_INCORRECT_STATE); mResolverDelegate = delegate; return CHIP_NO_ERROR; } CHIP_ERROR DiscoveryImplPlatform::ResolveNodeId(const PeerId & peerId, Inet::IPAddressType type) { ReturnErrorOnFailure(Init()); MdnsService service; ReturnErrorOnFailure(MakeInstanceName(service.mName, sizeof(service.mName), peerId)); strncpy(service.mType, "_chip", sizeof(service.mType)); service.mProtocol = MdnsServiceProtocol::kMdnsProtocolTcp; service.mAddressType = type; return ChipMdnsResolve(&service, INET_NULL_INTERFACEID, HandleNodeIdResolve, this); } void DiscoveryImplPlatform::HandleNodeIdResolve(void * context, MdnsService * result, CHIP_ERROR error) { DiscoveryImplPlatform * mgr = static_cast<DiscoveryImplPlatform *>(context); if (mgr->mResolverDelegate == nullptr) { return; } if (error != CHIP_NO_ERROR) { ChipLogError(Discovery, "Node ID resolved failed with %s", chip::ErrorStr(error)); mgr->mResolverDelegate->OnNodeIdResolutionFailed(PeerId(), error); return; } if (result == nullptr) { ChipLogError(Discovery, "Node ID resolve not found"); mgr->mResolverDelegate->OnNodeIdResolutionFailed(PeerId(), CHIP_ERROR_UNKNOWN_RESOURCE_ID); return; } ResolvedNodeData nodeData; error = ExtractIdFromInstanceName(result->mName, &nodeData.mPeerId); if (error != CHIP_NO_ERROR) { ChipLogError(Discovery, "Node ID resolved failed with %s", chip::ErrorStr(error)); mgr->mResolverDelegate->OnNodeIdResolutionFailed(PeerId(), error); return; } nodeData.mInterfaceId = result->mInterface; nodeData.mAddress = result->mAddress.ValueOr({}); nodeData.mPort = result->mPort; ChipLogProgress(Discovery, "Node ID resolved for 0x" ChipLogFormatX64, ChipLogValueX64(nodeData.mPeerId.GetNodeId())); mgr->mResolverDelegate->OnNodeIdResolved(nodeData); } DiscoveryImplPlatform & DiscoveryImplPlatform::GetInstance() { return sManager; } ServiceAdvertiser & chip::Mdns::ServiceAdvertiser::Instance() { return DiscoveryImplPlatform::GetInstance(); } Resolver & chip::Mdns::Resolver::Instance() { return DiscoveryImplPlatform::GetInstance(); } } // namespace Mdns } // namespace chip
/* * Copyright 2019 Google Inc. * * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #include "modules/skottie/src/effects/Effects.h" #include "modules/skottie/src/SkottieValue.h" #include "modules/sksg/include/SkSGRenderEffect.h" #include "src/utils/SkJSON.h" namespace skottie { namespace internal { namespace { class GaussianBlurEffectAdapter final : public AnimatablePropertyContainer { public: static sk_sp<GaussianBlurEffectAdapter> Make(const skjson::ArrayValue& jprops, sk_sp<sksg::RenderNode> layer, const AnimationBuilder* abuilder) { return sk_sp<GaussianBlurEffectAdapter>(new GaussianBlurEffectAdapter(jprops, std::move(layer), abuilder)); } const sk_sp<sksg::RenderNode>& node() const { return fImageFilterEffect; } private: GaussianBlurEffectAdapter(const skjson::ArrayValue& jprops, sk_sp<sksg::RenderNode> layer, const AnimationBuilder* abuilder) : fBlur(sksg::BlurImageFilter::Make()) , fImageFilterEffect(sksg::ImageFilterEffect::Make(std::move(layer), fBlur)) { enum : size_t { kBlurriness_Index = 0, kDimensions_Index = 1, kRepeatEdge_Index = 2, }; EffectBinder(jprops, *abuilder, this) .bind(kBlurriness_Index, fBlurriness) .bind(kDimensions_Index, fDimensions) .bind(kRepeatEdge_Index, fRepeatEdge); } void onSync() override { static constexpr SkVector kDimensionsMap[] = { { 1, 1 }, // 1 -> horizontal and vertical { 1, 0 }, // 2 -> horizontal { 0, 1 }, // 3 -> vertical }; const auto dim_index = SkTPin<size_t>(static_cast<size_t>(fDimensions), 1, SK_ARRAY_COUNT(kDimensionsMap)) - 1; const auto sigma = fBlurriness * kBlurSizeToSigma; fBlur->setSigma({ sigma * kDimensionsMap[dim_index].x(), sigma * kDimensionsMap[dim_index].y() }); static constexpr SkTileMode kRepeatEdgeMap[] = { SkTileMode::kDecal, // 0 -> repeat edge pixels: off SkTileMode::kClamp, // 1 -> repeat edge pixels: on }; const auto repeat_index = SkTPin<size_t>(static_cast<size_t>(fRepeatEdge), 0, SK_ARRAY_COUNT(kRepeatEdgeMap) - 1); fBlur->setTileMode(kRepeatEdgeMap[repeat_index]); } const sk_sp<sksg::BlurImageFilter> fBlur; const sk_sp<sksg::RenderNode> fImageFilterEffect; ScalarValue fBlurriness = 0, // Controls the blur sigma. fDimensions = 1, // 1 -> horizontal & vertical, 2 -> horizontal, 3 -> vertical fRepeatEdge = 0; // 0 -> clamp, 1 -> repeat }; } // namespace sk_sp<sksg::RenderNode> EffectBuilder::attachGaussianBlurEffect( const skjson::ArrayValue& jprops, sk_sp<sksg::RenderNode> layer) const { return fBuilder->attachDiscardableAdapter<GaussianBlurEffectAdapter>(jprops, std::move(layer), fBuilder); } } // namespace internal } // namespace skottie
//===-- PdbYaml.cpp ------------------------------------------- *- C++ --*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "PdbYaml.h" #include "llvm/ADT/StringExtras.h" #include "llvm/DebugInfo/CodeView/CVTypeVisitor.h" #include "llvm/DebugInfo/PDB/Native/PDBFile.h" #include "llvm/DebugInfo/PDB/Native/RawTypes.h" #include "llvm/DebugInfo/PDB/Native/TpiHashing.h" #include "llvm/DebugInfo/PDB/PDBTypes.h" #include "llvm/ObjectYAML/CodeViewYAMLDebugSections.h" #include "llvm/ObjectYAML/CodeViewYAMLTypes.h" using namespace llvm; using namespace llvm::pdb; using namespace llvm::pdb::yaml; using namespace llvm::yaml; LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::pdb::yaml::NamedStreamMapping) LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::pdb::yaml::PdbDbiModuleInfo) LLVM_YAML_IS_SEQUENCE_VECTOR(llvm::pdb::yaml::StreamBlockList) LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(llvm::pdb::PdbRaw_FeatureSig) namespace llvm { namespace yaml { template <> struct ScalarEnumerationTraits<llvm::pdb::PDB_Machine> { static void enumeration(IO &io, llvm::pdb::PDB_Machine &Value) { io.enumCase(Value, "Invalid", PDB_Machine::Invalid); io.enumCase(Value, "Am33", PDB_Machine::Am33); io.enumCase(Value, "Amd64", PDB_Machine::Amd64); io.enumCase(Value, "Arm", PDB_Machine::Arm); io.enumCase(Value, "ArmNT", PDB_Machine::ArmNT); io.enumCase(Value, "Ebc", PDB_Machine::Ebc); io.enumCase(Value, "x86", PDB_Machine::x86); io.enumCase(Value, "Ia64", PDB_Machine::Ia64); io.enumCase(Value, "M32R", PDB_Machine::M32R); io.enumCase(Value, "Mips16", PDB_Machine::Mips16); io.enumCase(Value, "MipsFpu", PDB_Machine::MipsFpu); io.enumCase(Value, "MipsFpu16", PDB_Machine::MipsFpu16); io.enumCase(Value, "PowerPCFP", PDB_Machine::PowerPCFP); io.enumCase(Value, "R4000", PDB_Machine::R4000); io.enumCase(Value, "SH3", PDB_Machine::SH3); io.enumCase(Value, "SH3DSP", PDB_Machine::SH3DSP); io.enumCase(Value, "Thumb", PDB_Machine::Thumb); io.enumCase(Value, "WceMipsV2", PDB_Machine::WceMipsV2); } }; template <> struct ScalarEnumerationTraits<llvm::pdb::PdbRaw_DbiVer> { static void enumeration(IO &io, llvm::pdb::PdbRaw_DbiVer &Value) { io.enumCase(Value, "V41", llvm::pdb::PdbRaw_DbiVer::PdbDbiVC41); io.enumCase(Value, "V50", llvm::pdb::PdbRaw_DbiVer::PdbDbiV50); io.enumCase(Value, "V60", llvm::pdb::PdbRaw_DbiVer::PdbDbiV60); io.enumCase(Value, "V70", llvm::pdb::PdbRaw_DbiVer::PdbDbiV70); io.enumCase(Value, "V110", llvm::pdb::PdbRaw_DbiVer::PdbDbiV110); } }; template <> struct ScalarEnumerationTraits<llvm::pdb::PdbRaw_ImplVer> { static void enumeration(IO &io, llvm::pdb::PdbRaw_ImplVer &Value) { io.enumCase(Value, "VC2", llvm::pdb::PdbRaw_ImplVer::PdbImplVC2); io.enumCase(Value, "VC4", llvm::pdb::PdbRaw_ImplVer::PdbImplVC4); io.enumCase(Value, "VC41", llvm::pdb::PdbRaw_ImplVer::PdbImplVC41); io.enumCase(Value, "VC50", llvm::pdb::PdbRaw_ImplVer::PdbImplVC50); io.enumCase(Value, "VC98", llvm::pdb::PdbRaw_ImplVer::PdbImplVC98); io.enumCase(Value, "VC70Dep", llvm::pdb::PdbRaw_ImplVer::PdbImplVC70Dep); io.enumCase(Value, "VC70", llvm::pdb::PdbRaw_ImplVer::PdbImplVC70); io.enumCase(Value, "VC80", llvm::pdb::PdbRaw_ImplVer::PdbImplVC80); io.enumCase(Value, "VC110", llvm::pdb::PdbRaw_ImplVer::PdbImplVC110); io.enumCase(Value, "VC140", llvm::pdb::PdbRaw_ImplVer::PdbImplVC140); } }; template <> struct ScalarEnumerationTraits<llvm::pdb::PdbRaw_TpiVer> { static void enumeration(IO &io, llvm::pdb::PdbRaw_TpiVer &Value) { io.enumCase(Value, "VC40", llvm::pdb::PdbRaw_TpiVer::PdbTpiV40); io.enumCase(Value, "VC41", llvm::pdb::PdbRaw_TpiVer::PdbTpiV41); io.enumCase(Value, "VC50", llvm::pdb::PdbRaw_TpiVer::PdbTpiV50); io.enumCase(Value, "VC70", llvm::pdb::PdbRaw_TpiVer::PdbTpiV70); io.enumCase(Value, "VC80", llvm::pdb::PdbRaw_TpiVer::PdbTpiV80); } }; template <> struct ScalarEnumerationTraits<llvm::pdb::PdbRaw_FeatureSig> { static void enumeration(IO &io, PdbRaw_FeatureSig &Features) { io.enumCase(Features, "MinimalDebugInfo", PdbRaw_FeatureSig::MinimalDebugInfo); io.enumCase(Features, "NoTypeMerge", PdbRaw_FeatureSig::NoTypeMerge); io.enumCase(Features, "VC110", PdbRaw_FeatureSig::VC110); io.enumCase(Features, "VC140", PdbRaw_FeatureSig::VC140); } }; } } void MappingTraits<PdbObject>::mapping(IO &IO, PdbObject &Obj) { IO.mapOptional("MSF", Obj.Headers); IO.mapOptional("StreamSizes", Obj.StreamSizes); IO.mapOptional("StreamMap", Obj.StreamMap); IO.mapOptional("StringTable", Obj.StringTable); IO.mapOptional("PdbStream", Obj.PdbStream); IO.mapOptional("DbiStream", Obj.DbiStream); IO.mapOptional("TpiStream", Obj.TpiStream); IO.mapOptional("IpiStream", Obj.IpiStream); IO.mapOptional("PublicsStream", Obj.PublicsStream); } void MappingTraits<MSFHeaders>::mapping(IO &IO, MSFHeaders &Obj) { IO.mapOptional("SuperBlock", Obj.SuperBlock); IO.mapOptional("NumDirectoryBlocks", Obj.NumDirectoryBlocks); IO.mapOptional("DirectoryBlocks", Obj.DirectoryBlocks); IO.mapOptional("NumStreams", Obj.NumStreams); IO.mapOptional("FileSize", Obj.FileSize); } void MappingTraits<msf::SuperBlock>::mapping(IO &IO, msf::SuperBlock &SB) { if (!IO.outputting()) { ::memcpy(SB.MagicBytes, msf::Magic, sizeof(msf::Magic)); } using u32 = support::ulittle32_t; IO.mapOptional("BlockSize", SB.BlockSize, u32(4096U)); IO.mapOptional("FreeBlockMap", SB.FreeBlockMapBlock, u32(0U)); IO.mapOptional("NumBlocks", SB.NumBlocks, u32(0U)); IO.mapOptional("NumDirectoryBytes", SB.NumDirectoryBytes, u32(0U)); IO.mapOptional("Unknown1", SB.Unknown1, u32(0U)); IO.mapOptional("BlockMapAddr", SB.BlockMapAddr, u32(0U)); } void MappingTraits<StreamBlockList>::mapping(IO &IO, StreamBlockList &SB) { IO.mapRequired("Stream", SB.Blocks); } void MappingTraits<PdbInfoStream>::mapping(IO &IO, PdbInfoStream &Obj) { IO.mapOptional("Age", Obj.Age, 1U); IO.mapOptional("Guid", Obj.Guid); IO.mapOptional("Signature", Obj.Signature, 0U); IO.mapOptional("Features", Obj.Features); IO.mapOptional("Version", Obj.Version, PdbImplVC70); } void MappingTraits<PdbDbiStream>::mapping(IO &IO, PdbDbiStream &Obj) { IO.mapOptional("VerHeader", Obj.VerHeader, PdbDbiV70); IO.mapOptional("Age", Obj.Age, 1U); IO.mapOptional("BuildNumber", Obj.BuildNumber, uint16_t(0U)); IO.mapOptional("PdbDllVersion", Obj.PdbDllVersion, 0U); IO.mapOptional("PdbDllRbld", Obj.PdbDllRbld, uint16_t(0U)); IO.mapOptional("Flags", Obj.Flags, uint16_t(1U)); IO.mapOptional("MachineType", Obj.MachineType, PDB_Machine::x86); IO.mapOptional("Modules", Obj.ModInfos); } void MappingTraits<PdbTpiStream>::mapping(IO &IO, pdb::yaml::PdbTpiStream &Obj) { IO.mapOptional("Version", Obj.Version, PdbTpiV80); IO.mapRequired("Records", Obj.Records); } void MappingTraits<PdbPublicsStream>::mapping( IO &IO, pdb::yaml::PdbPublicsStream &Obj) { IO.mapRequired("Records", Obj.PubSyms); } void MappingTraits<NamedStreamMapping>::mapping(IO &IO, NamedStreamMapping &Obj) { IO.mapRequired("Name", Obj.StreamName); IO.mapRequired("StreamNum", Obj.StreamNumber); } void MappingTraits<PdbModiStream>::mapping(IO &IO, PdbModiStream &Obj) { IO.mapOptional("Signature", Obj.Signature, 4U); IO.mapRequired("Records", Obj.Symbols); } void MappingTraits<PdbDbiModuleInfo>::mapping(IO &IO, PdbDbiModuleInfo &Obj) { IO.mapRequired("Module", Obj.Mod); IO.mapOptional("ObjFile", Obj.Obj, Obj.Mod); IO.mapOptional("SourceFiles", Obj.SourceFiles); IO.mapOptional("Subsections", Obj.Subsections); IO.mapOptional("Modi", Obj.Modi); }
#include "FWCore/Framework/interface/one/EDAnalyzer.h" #include "FWCore/MessageLogger/interface/MessageLogger.h" #include "FWCore/ServiceRegistry/interface/Service.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/EventSetup.h" #include "FWCore/Framework/interface/ESHandle.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "CondFormats/Common/interface/FileBlob.h" #include "Geometry/Records/interface/GeometryFileRcd.h" #include <string> #include <vector> #include <fstream> class XMLGeometryReader : public edm::one::EDAnalyzer<edm::one::WatchRuns> { public: XMLGeometryReader(const edm::ParameterSet&); void beginJob() override {} void beginRun(edm::Run const& iEvent, edm::EventSetup const&) override; void analyze(edm::Event const& iEvent, edm::EventSetup const&) override {} void endRun(edm::Run const& iEvent, edm::EventSetup const&) override {} private: std::string m_fname; std::string m_label; edm::ESGetToken<FileBlob, GeometryFileRcd> fileBlobToken_; }; XMLGeometryReader::XMLGeometryReader(const edm::ParameterSet& iConfig) { m_fname = iConfig.getUntrackedParameter<std::string>("XMLFileName", "test.xml"); m_label = iConfig.getUntrackedParameter<std::string>("geomLabel", "Extended"); fileBlobToken_ = esConsumes<edm::Transition::BeginRun>(); } void XMLGeometryReader::beginRun(edm::Run const& run, edm::EventSetup const& iSetup) { edm::LogInfo("XMLGeometryReader") << "XMLGeometryReader::beginRun"; auto geometry = iSetup.getHandle(fileBlobToken_); std::unique_ptr<std::vector<unsigned char> > blob((*geometry).getUncompressedBlob()); std::string outfile1(m_fname); std::ofstream output1(outfile1.c_str()); output1.write((const char*)&(*blob)[0], blob->size()); output1.close(); } DEFINE_FWK_MODULE(XMLGeometryReader);
// Copyright (c) 2009-2019 Satoshi Nakamoto // Copyright (c) 2009-2019 The Bitcoin Developers // Copyright (c) 2016-2019 Duality Blockchain Solutions Developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "clientmodel.h" #include "bantablemodel.h" #include "guiconstants.h" #include "guiutil.h" #include "peertablemodel.h" #include "alert.h" #include "chainparams.h" #include "checkpoints.h" #include "clientversion.h" #include "net.h" #include "txmempool.h" #include "ui_interface.h" #include "util.h" #include <stdint.h> #include <QDebug> #include <QTimer> static const int64_t nClientStartupTime = GetTime(); static int64_t nLastHeaderTipUpdateNotification = 0; static int64_t nLastBlockTipUpdateNotification = 0; ClientModel::ClientModel(OptionsModel *optionsModel, QObject *parent) : QObject(parent), optionsModel(optionsModel), peerTableModel(0), banTableModel(0), cachedNumBlocks(0), cachedReindexing(0), cachedImporting(0), numBlocksAtStartup(-1), pollTimer(0) { peerTableModel = new PeerTableModel(this); banTableModel = new BanTableModel(this); pollTimer = new QTimer(this); connect(pollTimer, SIGNAL(timeout()), this, SLOT(updateTimer())); pollTimer->start(MODEL_UPDATE_DELAY); subscribeToCoreSignals(); } ClientModel::~ClientModel() { unsubscribeFromCoreSignals(); } int ClientModel::getNumConnections(unsigned int flags) const { LOCK(cs_vNodes); if (flags == CONNECTIONS_ALL) // Shortcut if we want total return vNodes.size(); int nNum = 0; Q_FOREACH(CNode* pnode, vNodes) if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) nNum++; return nNum; } int ClientModel::getNumBlocks() const { LOCK(cs_main); return chainActive.Height(); } int ClientModel::getNumBlocksAtStartup() { if (numBlocksAtStartup == -1) numBlocksAtStartup = getNumBlocks(); return numBlocksAtStartup; } int ClientModel::getHeaderTipHeight() const { LOCK(cs_main); if (!pindexBestHeader) return 0; return pindexBestHeader->nHeight; } int64_t ClientModel::getHeaderTipTime() const { LOCK(cs_main); if (!pindexBestHeader) return 0; return pindexBestHeader->GetBlockTime(); } quint64 ClientModel::getTotalBytesRecv() const { return CNode::GetTotalBytesRecv(); } quint64 ClientModel::getTotalBytesSent() const { return CNode::GetTotalBytesSent(); } QDateTime ClientModel::getLastBlockDate() const { LOCK(cs_main); if (chainActive.Tip()) return QDateTime::fromTime_t(chainActive.Tip()->GetBlockTime()); else return QDateTime::fromTime_t(Params().GenesisBlock().GetBlockTime()); // Genesis block's time of current network } double ClientModel::getVerificationProgress(const CBlockIndex *tipIn) const { CBlockIndex *tip = const_cast<CBlockIndex *>(tipIn); if (!tip) { LOCK(cs_main); tip = chainActive.Tip(); } return Checkpoints::GuessVerificationProgress(Params().Checkpoints(), tip); } long ClientModel::getMempoolSize() const { return mempool.size(); } size_t ClientModel::getMempoolDynamicUsage() const { return mempool.DynamicMemoryUsage(); } void ClientModel::updateTimer() { // no locking required at this point // the following calls will aquire the required lock Q_EMIT mempoolSizeChanged(getMempoolSize(), getMempoolDynamicUsage()); Q_EMIT bytesChanged(getTotalBytesRecv(), getTotalBytesSent()); } void ClientModel::updateNumConnections(int numConnections) { Q_EMIT numConnectionsChanged(numConnections); } void ClientModel::updateAlert(const QString &hash, int status) { // Show error message notification for new alert if(status == CT_NEW) { uint256 hash_256; hash_256.SetHex(hash.toStdString()); CAlert alert = CAlert::getAlertByHash(hash_256); if(!alert.IsNull()) { Q_EMIT message(tr("Network Alert"), QString::fromStdString(alert.strStatusBar), CClientUIInterface::ICON_ERROR); } } Q_EMIT alertsChanged(getStatusBarWarnings()); } bool ClientModel::inInitialBlockDownload() const { return IsInitialBlockDownload(); } enum BlockSource ClientModel::getBlockSource() const { if (fReindex) return BLOCK_SOURCE_REINDEX; else if (fImporting) return BLOCK_SOURCE_DISK; else if (getNumConnections() > 0) return BLOCK_SOURCE_NETWORK; return BLOCK_SOURCE_NONE; } QString ClientModel::getStatusBarWarnings() const { return QString::fromStdString(GetWarnings("statusbar")); } OptionsModel *ClientModel::getOptionsModel() { return optionsModel; } PeerTableModel *ClientModel::getPeerTableModel() { return peerTableModel; } BanTableModel *ClientModel::getBanTableModel() { return banTableModel; } QString ClientModel::formatFullVersion() const { return QString::fromStdString(FormatFullVersion()); } QString ClientModel::formatSubVersion() const { return QString::fromStdString(strSubVersion); } bool ClientModel::isReleaseVersion() const { return CLIENT_VERSION_IS_RELEASE; } QString ClientModel::formatClientStartupTime() const { return QDateTime::fromTime_t(nClientStartupTime).toString(); } void ClientModel::updateBanlist() { banTableModel->refresh(); } QString ClientModel::dataDir() const { return GUIUtil::boostPathToQString(GetDataDir()); } // Handlers for core signals static void ShowProgress(ClientModel *clientmodel, const std::string &title, int nProgress) { // emits signal "showProgress" QMetaObject::invokeMethod(clientmodel, "showProgress", Qt::QueuedConnection, Q_ARG(QString, QString::fromStdString(title)), Q_ARG(int, nProgress)); } static void NotifyNumConnectionsChanged(ClientModel *clientmodel, int newNumConnections) { // Too noisy: qDebug() << "NotifyNumConnectionsChanged : " + QString::number(newNumConnections); QMetaObject::invokeMethod(clientmodel, "updateNumConnections", Qt::QueuedConnection, Q_ARG(int, newNumConnections)); } static void NotifyAlertChanged(ClientModel *clientmodel, const uint256 &hash, ChangeType status) { qDebug() << "NotifyAlertChanged : " + QString::fromStdString(hash.GetHex()) + " status=" + QString::number(status); QMetaObject::invokeMethod(clientmodel, "updateAlert", Qt::QueuedConnection, Q_ARG(QString, QString::fromStdString(hash.GetHex())), Q_ARG(int, status)); } static void BannedListChanged(ClientModel *clientmodel) { qDebug() << QString("%1: Requesting update for peer banlist").arg(__func__); QMetaObject::invokeMethod(clientmodel, "updateBanlist", Qt::QueuedConnection); } static void BlockTipChanged(ClientModel *clientmodel, bool initialSync, const CBlockIndex *pIndex, bool fHeader) { // lock free async UI updates in case we have a new block tip // during initial sync, only update the UI if the last update // was > 250ms (MODEL_UPDATE_DELAY) ago int64_t now = 0; if (initialSync) now = GetTimeMillis(); int64_t& nLastUpdateNotification = fHeader ? nLastHeaderTipUpdateNotification : nLastBlockTipUpdateNotification; // if we are in-sync, update the UI regardless of last update time if (!initialSync || now - nLastUpdateNotification > MODEL_UPDATE_DELAY) { //pass a async signal to the UI thread QMetaObject::invokeMethod(clientmodel, "numBlocksChanged", Qt::QueuedConnection, Q_ARG(int, pIndex->nHeight), Q_ARG(QDateTime, QDateTime::fromTime_t(pIndex->GetBlockTime())), Q_ARG(double, clientmodel->getVerificationProgress(pIndex)), Q_ARG(bool, fHeader)); nLastUpdateNotification = now; nLastBlockTipUpdateNotification = now; } } void ClientModel::subscribeToCoreSignals() { // Connect signals to client uiInterface.ShowProgress.connect(boost::bind(ShowProgress, this, _1, _2)); uiInterface.NotifyNumConnectionsChanged.connect(boost::bind(NotifyNumConnectionsChanged, this, _1)); uiInterface.NotifyAlertChanged.connect(boost::bind(NotifyAlertChanged, this, _1, _2)); uiInterface.BannedListChanged.connect(boost::bind(BannedListChanged, this)); uiInterface.NotifyBlockTip.connect(boost::bind(BlockTipChanged, this, _1, _2, false)); uiInterface.NotifyHeaderTip.connect(boost::bind(BlockTipChanged, this, _1, _2, true)); } void ClientModel::unsubscribeFromCoreSignals() { // Disconnect signals from client uiInterface.ShowProgress.disconnect(boost::bind(ShowProgress, this, _1, _2)); uiInterface.NotifyNumConnectionsChanged.disconnect(boost::bind(NotifyNumConnectionsChanged, this, _1)); uiInterface.NotifyAlertChanged.disconnect(boost::bind(NotifyAlertChanged, this, _1, _2)); uiInterface.BannedListChanged.disconnect(boost::bind(BannedListChanged, this)); uiInterface.NotifyBlockTip.disconnect(boost::bind(BlockTipChanged, this, _1, _2, false)); uiInterface.NotifyHeaderTip.disconnect(boost::bind(BlockTipChanged, this, _1, _2, true)); }
#include "Bool.h" Bool::Bool() :data(false) {} Bool::Bool(bool data) :data(data) {}
#include <prevc/pipeline/AST/function-declaration.hxx> #include <utility> namespace prevc { namespace pipeline { namespace AST { FunctionDeclaration::FunctionDeclaration(Pipeline* pipeline, util::Location&& location, const util::String& name, Type* type, AST::Parameters* parameters, Expression* implementation): Declaration(pipeline, std::move(location), Declaration::Kind::Function, name, type), parameters(parameters), implementation(implementation), frame(nullptr) { } FunctionDeclaration::~FunctionDeclaration() { delete parameters; if (implementation != nullptr) delete implementation; if (frame != nullptr) delete frame; } void FunctionDeclaration::check_semantics() { Declaration::check_semantics(); auto& global_namespace = pipeline->global_namespace; pipeline->frame_system->push(); global_namespace->push_scope(); for (auto& parameter : *parameters) { if (global_namespace->insert_declaration(parameter)) continue; auto duplicate = global_namespace->find_declaration(parameter->name).value(); auto& location = duplicate->location; CompileTimeError::raise(pipeline->file_name, parameter->location, util::String::format("parameter name `%s` already used (at %d:%d)", parameter->name.c_str(), location.line_0, location.column_0)); } parameters->check_semantics(); for (auto& parameter : *parameters) { const auto& parameter_type = parameter->get_semantic_type(); if (!parameter_type->can_be_passed()) CompileTimeError::raise(pipeline->file_name, parameter->location, util::String::format( "functions parameters can only be of type `bool`, `char`, `int` or `ptr <anything>`, " "type `%s` is not one of them", parameter_type->to_string().c_str())); } const auto& return_type = this->get_semantic_type(); if (!return_type->can_be_returned()) CompileTimeError::raise(pipeline->file_name, type->location, util::String::format( "functions return type can only be `void`, `bool`, `char`, `int` or `ptr <anything>`, " "type `%s` is not one of them", return_type->to_string().c_str())); if (implementation != nullptr) { implementation->check_semantics(); const auto& implementation_type = implementation->get_semantic_type(); if (!this->get_semantic_type()->equals(implementation->get_semantic_type())) CompileTimeError::raise(pipeline->file_name, this->location, util::String::format( "declared function `%s` return type is `%s`, but the provided implementation returns " "an expression of type `%s`", this->name.c_str(), this->get_semantic_type()->to_string().c_str(), implementation_type->to_string().c_str())); } global_namespace->pop_scope(); this->frame = (semantic_analysis::Frame*) pipeline->frame_system->pop(); } util::String FunctionDeclaration::get_native_name() const { return (frame->level <= 1) ? name : util::String::format("f_%i_%zu", this->frame->level, this->id); } void FunctionDeclaration::generate_IR_declaration() { auto& module = this->pipeline->IR_module; auto& context = module->getContext(); auto sem_type = ((semantic_analysis::Type*) this->get_semantic_type()); auto type = sem_type->get_llvm_type(context); auto linkage = (this->frame->level <= 1) ? llvm::Function::ExternalLinkage : llvm::Function::PrivateLinkage; llvm_parameters.reserve(parameters->size() + 1); if (this->frame->level > 1) llvm_parameters.push_back(llvm::PointerType::getUnqual(frame->static_link->get_llvm_type(context))); for (auto& parameter : *this->parameters) llvm_parameters.push_back(((semantic_analysis::Type*) parameter->get_semantic_type())->get_llvm_type(context)); auto fun_type = llvm::FunctionType::get(type, llvm_parameters, false); auto function = llvm::Function::Create(fun_type, linkage, get_native_name().c_str(), module); function->setCallingConv(llvm::CallingConv::C); } void FunctionDeclaration::generate_IR_implementation() { if (this->implementation != nullptr) { auto& module = this->pipeline->IR_module; auto& context = module->getContext(); auto sem_type = ((semantic_analysis::Type*) this->get_semantic_type()); auto function = module->getFunction(get_native_name().c_str()); llvm::IRBuilder<> builder(llvm::BasicBlock::Create(context, "entry", function)); auto frame_type = this->frame->get_llvm_type(context); auto allocated = builder.CreateAlloca(frame_type); this->frame->allocated_frame = allocated; auto args = function->arg_begin(); if (this->frame->level > 1) builder.CreateStore(args++, builder.CreateStructGEP(frame_type, allocated, 0)); for (auto& parameter : *this->parameters) builder.CreateStore(args++, builder.CreateStructGEP(frame_type, allocated, (std::uint32_t) parameter->frame_index)); auto value = this->implementation->generate_IR(&builder); if (!sem_type->is_void()) builder.CreateRet(value); else builder.CreateRetVoid(); } } util::String FunctionDeclaration::to_string() const noexcept { return util::String::format( R"({"type": "function-declaration", "location": %s, "name": "%s", "declarationType": %s, "parameters": %s, "implementation": %s})", location.to_string().c_str(), name.c_str(), type->to_string().c_str(), parameters->to_string().c_str(), implementation == nullptr ? "null" : implementation->to_string().c_str()); } } } }
//===- SelectionDAGBuilder.cpp - Selection-DAG building -------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This implements routines for translating from LLVM IR into SelectionDAG IR. // //===----------------------------------------------------------------------===// #include "SelectionDAGBuilder.h" #include "SDNodeDbgValue.h" #include "llvm/ADT/APFloat.h" #include "llvm/ADT/APInt.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/BitVector.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/None.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/ADT/Triple.h" #include "llvm/ADT/Twine.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/BranchProbabilityInfo.h" #include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/EHPersonalities.h" #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/MemoryLocation.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/VectorUtils.h" #include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GCMetadata.h" #include "llvm/CodeGen/ISDOpcodes.h" #include "llvm/CodeGen/MachineBasicBlock.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineJumpTableInfo.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RuntimeLibcalls.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGNodes.h" #include "llvm/CodeGen/SelectionDAGTargetInfo.h" #include "llvm/CodeGen/StackMaps.h" #include "llvm/CodeGen/SwiftErrorValueTracking.h" #include "llvm/CodeGen/TargetFrameLowering.h" #include "llvm/CodeGen/TargetInstrInfo.h" #include "llvm/CodeGen/TargetLowering.h" #include "llvm/CodeGen/TargetOpcodes.h" #include "llvm/CodeGen/TargetRegisterInfo.h" #include "llvm/CodeGen/TargetSubtargetInfo.h" #include "llvm/CodeGen/ValueTypes.h" #include "llvm/CodeGen/WinEHFuncInfo.h" #include "llvm/IR/Argument.h" #include "llvm/IR/Attributes.h" #include "llvm/IR/BasicBlock.h" #include "llvm/IR/CFG.h" #include "llvm/IR/CallSite.h" #include "llvm/IR/CallingConv.h" #include "llvm/IR/Constant.h" #include "llvm/IR/ConstantRange.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DebugInfoMetadata.h" #include "llvm/IR/DebugLoc.h" #include "llvm/IR/DerivedTypes.h" #include "llvm/IR/Function.h" #include "llvm/IR/GetElementPtrTypeIterator.h" #include "llvm/IR/InlineAsm.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Intrinsics.h" #include "llvm/IR/LLVMContext.h" #include "llvm/IR/Metadata.h" #include "llvm/IR/Module.h" #include "llvm/IR/Operator.h" #include "llvm/IR/PatternMatch.h" #include "llvm/IR/Statepoint.h" #include "llvm/IR/Type.h" #include "llvm/IR/User.h" #include "llvm/IR/Value.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCSymbol.h" #include "llvm/Support/AtomicOrdering.h" #include "llvm/Support/BranchProbability.h" #include "llvm/Support/Casting.h" #include "llvm/Support/CodeGen.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MachineValueType.h" #include "llvm/Support/MathExtras.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetIntrinsicInfo.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetOptions.h" #include "llvm/Transforms/Utils/Local.h" #include <algorithm> #include <cassert> #include <cstddef> #include <cstdint> #include <cstring> #include <iterator> #include <limits> #include <numeric> #include <tuple> #include <utility> #include <vector> using namespace llvm; using namespace PatternMatch; using namespace SwitchCG; #define DEBUG_TYPE "isel" /// LimitFloatPrecision - Generate low-precision inline sequences for /// some float libcalls (6, 8 or 12 bits). static unsigned LimitFloatPrecision; static cl::opt<unsigned, true> LimitFPPrecision("limit-float-precision", cl::desc("Generate low-precision inline sequences " "for some float libcalls"), cl::location(LimitFloatPrecision), cl::Hidden, cl::init(0)); static cl::opt<unsigned> SwitchPeelThreshold( "switch-peel-threshold", cl::Hidden, cl::init(66), cl::desc("Set the case probability threshold for peeling the case from a " "switch statement. A value greater than 100 will void this " "optimization")); // Limit the width of DAG chains. This is important in general to prevent // DAG-based analysis from blowing up. For example, alias analysis and // load clustering may not complete in reasonable time. It is difficult to // recognize and avoid this situation within each individual analysis, and // future analyses are likely to have the same behavior. Limiting DAG width is // the safe approach and will be especially important with global DAGs. // // MaxParallelChains default is arbitrarily high to avoid affecting // optimization, but could be lowered to improve compile time. Any ld-ld-st-st // sequence over this should have been converted to llvm.memcpy by the // frontend. It is easy to induce this behavior with .ll code such as: // %buffer = alloca [4096 x i8] // %data = load [4096 x i8]* %argPtr // store [4096 x i8] %data, [4096 x i8]* %buffer static const unsigned MaxParallelChains = 64; // Return the calling convention if the Value passed requires ABI mangling as it // is a parameter to a function or a return value from a function which is not // an intrinsic. static Optional<CallingConv::ID> getABIRegCopyCC(const Value *V) { if (auto *R = dyn_cast<ReturnInst>(V)) return R->getParent()->getParent()->getCallingConv(); if (auto *CI = dyn_cast<CallInst>(V)) { const bool IsInlineAsm = CI->isInlineAsm(); const bool IsIndirectFunctionCall = !IsInlineAsm && !CI->getCalledFunction(); // It is possible that the call instruction is an inline asm statement or an // indirect function call in which case the return value of // getCalledFunction() would be nullptr. const bool IsInstrinsicCall = !IsInlineAsm && !IsIndirectFunctionCall && CI->getCalledFunction()->getIntrinsicID() != Intrinsic::not_intrinsic; if (!IsInlineAsm && !IsInstrinsicCall) return CI->getCallingConv(); } return None; } static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, Optional<CallingConv::ID> CC); /// getCopyFromParts - Create a value that contains the specified legal parts /// combined into the value they represent. If the parts combine to a type /// larger than ValueVT then AssertOp can be used to specify whether the extra /// bits are known to be zero (ISD::AssertZext) or sign extended from ValueVT /// (ISD::AssertSext). static SDValue getCopyFromParts(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, Optional<CallingConv::ID> CC = None, Optional<ISD::NodeType> AssertOp = None) { if (ValueVT.isVector()) return getCopyFromPartsVector(DAG, DL, Parts, NumParts, PartVT, ValueVT, V, CC); assert(NumParts > 0 && "No parts to assemble!"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Val = Parts[0]; if (NumParts > 1) { // Assemble the value from multiple parts. if (ValueVT.isInteger()) { unsigned PartBits = PartVT.getSizeInBits(); unsigned ValueBits = ValueVT.getSizeInBits(); // Assemble the power of 2 part. unsigned RoundParts = (NumParts & (NumParts - 1)) ? 1 << Log2_32(NumParts) : NumParts; unsigned RoundBits = PartBits * RoundParts; EVT RoundVT = RoundBits == ValueBits ? ValueVT : EVT::getIntegerVT(*DAG.getContext(), RoundBits); SDValue Lo, Hi; EVT HalfVT = EVT::getIntegerVT(*DAG.getContext(), RoundBits/2); if (RoundParts > 2) { Lo = getCopyFromParts(DAG, DL, Parts, RoundParts / 2, PartVT, HalfVT, V); Hi = getCopyFromParts(DAG, DL, Parts + RoundParts / 2, RoundParts / 2, PartVT, HalfVT, V); } else { Lo = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[0]); Hi = DAG.getNode(ISD::BITCAST, DL, HalfVT, Parts[1]); } if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); Val = DAG.getNode(ISD::BUILD_PAIR, DL, RoundVT, Lo, Hi); if (RoundParts < NumParts) { // Assemble the trailing non-power-of-2 part. unsigned OddParts = NumParts - RoundParts; EVT OddVT = EVT::getIntegerVT(*DAG.getContext(), OddParts * PartBits); Hi = getCopyFromParts(DAG, DL, Parts + RoundParts, OddParts, PartVT, OddVT, V, CC); // Combine the round and odd parts. Lo = Val; if (DAG.getDataLayout().isBigEndian()) std::swap(Lo, Hi); EVT TotalVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Hi = DAG.getNode(ISD::ANY_EXTEND, DL, TotalVT, Hi); Hi = DAG.getNode(ISD::SHL, DL, TotalVT, Hi, DAG.getConstant(Lo.getValueSizeInBits(), DL, TLI.getPointerTy(DAG.getDataLayout()))); Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, TotalVT, Lo); Val = DAG.getNode(ISD::OR, DL, TotalVT, Lo, Hi); } } else if (PartVT.isFloatingPoint()) { // FP split into multiple FP parts (for ppcf128) assert(ValueVT == EVT(MVT::ppcf128) && PartVT == MVT::f64 && "Unexpected split"); SDValue Lo, Hi; Lo = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[0]); Hi = DAG.getNode(ISD::BITCAST, DL, EVT(MVT::f64), Parts[1]); if (TLI.hasBigEndianPartOrdering(ValueVT, DAG.getDataLayout())) std::swap(Lo, Hi); Val = DAG.getNode(ISD::BUILD_PAIR, DL, ValueVT, Lo, Hi); } else { // FP split into integer parts (soft fp) assert(ValueVT.isFloatingPoint() && PartVT.isInteger() && !PartVT.isVector() && "Unexpected split"); EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); Val = getCopyFromParts(DAG, DL, Parts, NumParts, PartVT, IntVT, V, CC); } } // There is now one part, held in Val. Correct it to match ValueVT. // PartEVT is the type of the register class that holds the value. // ValueVT is the type of the inline asm operation. EVT PartEVT = Val.getValueType(); if (PartEVT == ValueVT) return Val; if (PartEVT.isInteger() && ValueVT.isFloatingPoint() && ValueVT.bitsLT(PartEVT)) { // For an FP value in an integer part, we need to truncate to the right // width first. PartEVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); Val = DAG.getNode(ISD::TRUNCATE, DL, PartEVT, Val); } // Handle types that have the same size. if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits()) return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); // Handle types with different sizes. if (PartEVT.isInteger() && ValueVT.isInteger()) { if (ValueVT.bitsLT(PartEVT)) { // For a truncate, see if we have any information to // indicate whether the truncated bits will always be // zero or sign-extension. if (AssertOp.hasValue()) Val = DAG.getNode(*AssertOp, DL, PartEVT, Val, DAG.getValueType(ValueVT)); return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); } return DAG.getNode(ISD::ANY_EXTEND, DL, ValueVT, Val); } if (PartEVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { // FP_ROUND's are always exact here. if (ValueVT.bitsLT(Val.getValueType())) return DAG.getNode( ISD::FP_ROUND, DL, ValueVT, Val, DAG.getTargetConstant(1, DL, TLI.getPointerTy(DAG.getDataLayout()))); return DAG.getNode(ISD::FP_EXTEND, DL, ValueVT, Val); } // Handle MMX to a narrower integer type by bitcasting MMX to integer and // then truncating. if (PartEVT == MVT::x86mmx && ValueVT.isInteger() && ValueVT.bitsLT(PartEVT)) { Val = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Val); return DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); } report_fatal_error("Unknown mismatch in getCopyFromParts!"); } static void diagnosePossiblyInvalidConstraint(LLVMContext &Ctx, const Value *V, const Twine &ErrMsg) { const Instruction *I = dyn_cast_or_null<Instruction>(V); if (!V) return Ctx.emitError(ErrMsg); const char *AsmError = ", possible invalid constraint for vector type"; if (const CallInst *CI = dyn_cast<CallInst>(I)) if (isa<InlineAsm>(CI->getCalledValue())) return Ctx.emitError(I, ErrMsg + AsmError); return Ctx.emitError(I, ErrMsg); } /// getCopyFromPartsVector - Create a value that contains the specified legal /// parts combined into the value they represent. If the parts combine to a /// type larger than ValueVT then AssertOp can be used to specify whether the /// extra bits are known to be zero (ISD::AssertZext) or sign extended from /// ValueVT (ISD::AssertSext). static SDValue getCopyFromPartsVector(SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts, MVT PartVT, EVT ValueVT, const Value *V, Optional<CallingConv::ID> CallConv) { assert(ValueVT.isVector() && "Not a vector value"); assert(NumParts > 0 && "No parts to assemble!"); const bool IsABIRegCopy = CallConv.hasValue(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Val = Parts[0]; // Handle a multi-element vector. if (NumParts > 1) { EVT IntermediateVT; MVT RegisterVT; unsigned NumIntermediates; unsigned NumRegs; if (IsABIRegCopy) { NumRegs = TLI.getVectorTypeBreakdownForCallingConv( *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } else { NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); NumParts = NumRegs; // Silence a compiler warning. assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); assert(RegisterVT.getSizeInBits() == Parts[0].getSimpleValueType().getSizeInBits() && "Part type sizes don't match!"); // Assemble the parts into intermediate operands. SmallVector<SDValue, 8> Ops(NumIntermediates); if (NumIntermediates == NumParts) { // If the register was not expanded, truncate or copy the value, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) Ops[i] = getCopyFromParts(DAG, DL, &Parts[i], 1, PartVT, IntermediateVT, V); } else if (NumParts > 0) { // If the intermediate type was expanded, build the intermediate // operands from the parts. assert(NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"); unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) Ops[i] = getCopyFromParts(DAG, DL, &Parts[i * Factor], Factor, PartVT, IntermediateVT, V); } // Build a vector with BUILD_VECTOR or CONCAT_VECTORS from the // intermediate operands. EVT BuiltVectorTy = EVT::getVectorVT(*DAG.getContext(), IntermediateVT.getScalarType(), (IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() * NumParts : NumIntermediates)); Val = DAG.getNode(IntermediateVT.isVector() ? ISD::CONCAT_VECTORS : ISD::BUILD_VECTOR, DL, BuiltVectorTy, Ops); } // There is now one part, held in Val. Correct it to match ValueVT. EVT PartEVT = Val.getValueType(); if (PartEVT == ValueVT) return Val; if (PartEVT.isVector()) { // If the element type of the source/dest vectors are the same, but the // parts vector has more elements than the value vector, then we have a // vector widening case (e.g. <2 x float> -> <4 x float>). Extract the // elements we want. if (PartEVT.getVectorElementType() == ValueVT.getVectorElementType()) { assert(PartEVT.getVectorNumElements() > ValueVT.getVectorNumElements() && "Cannot narrow, it would be a lossy transformation"); return DAG.getNode( ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); } // Vector/Vector bitcast. if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); assert(PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements() && "Cannot handle this kind of promotion"); // Promoted vector extract return DAG.getAnyExtOrTrunc(Val, DL, ValueVT); } // Trivial bitcast if the types are the same size and the destination // vector type is legal. if (PartEVT.getSizeInBits() == ValueVT.getSizeInBits() && TLI.isTypeLegal(ValueVT)) return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); if (ValueVT.getVectorNumElements() != 1) { // Certain ABIs require that vectors are passed as integers. For vectors // are the same size, this is an obvious bitcast. if (ValueVT.getSizeInBits() == PartEVT.getSizeInBits()) { return DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); } else if (ValueVT.getSizeInBits() < PartEVT.getSizeInBits()) { // Bitcast Val back the original type and extract the corresponding // vector we want. unsigned Elts = PartEVT.getSizeInBits() / ValueVT.getScalarSizeInBits(); EVT WiderVecType = EVT::getVectorVT(*DAG.getContext(), ValueVT.getVectorElementType(), Elts); Val = DAG.getBitcast(WiderVecType, Val); return DAG.getNode( ISD::EXTRACT_SUBVECTOR, DL, ValueVT, Val, DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); } diagnosePossiblyInvalidConstraint( *DAG.getContext(), V, "non-trivial scalar-to-vector conversion"); return DAG.getUNDEF(ValueVT); } // Handle cases such as i8 -> <1 x i1> EVT ValueSVT = ValueVT.getVectorElementType(); if (ValueVT.getVectorNumElements() == 1 && ValueSVT != PartEVT) Val = ValueVT.isFloatingPoint() ? DAG.getFPExtendOrRound(Val, DL, ValueSVT) : DAG.getAnyExtOrTrunc(Val, DL, ValueSVT); return DAG.getBuildVector(ValueVT, DL, Val); } static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &dl, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, Optional<CallingConv::ID> CallConv); /// getCopyToParts - Create a series of nodes that contain the specified value /// split into legal parts. If the parts contain more bits than Val, then, for /// integers, ExtendKind can be used to specify how to generate the extra bits. static void getCopyToParts(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, Optional<CallingConv::ID> CallConv = None, ISD::NodeType ExtendKind = ISD::ANY_EXTEND) { EVT ValueVT = Val.getValueType(); // Handle the vector case separately. if (ValueVT.isVector()) return getCopyToPartsVector(DAG, DL, Val, Parts, NumParts, PartVT, V, CallConv); unsigned PartBits = PartVT.getSizeInBits(); unsigned OrigNumParts = NumParts; assert(DAG.getTargetLoweringInfo().isTypeLegal(PartVT) && "Copying to an illegal type!"); if (NumParts == 0) return; assert(!ValueVT.isVector() && "Vector case handled elsewhere"); EVT PartEVT = PartVT; if (PartEVT == ValueVT) { assert(NumParts == 1 && "No-op copy with multiple parts!"); Parts[0] = Val; return; } if (NumParts * PartBits > ValueVT.getSizeInBits()) { // If the parts cover more bits than the value has, promote the value. if (PartVT.isFloatingPoint() && ValueVT.isFloatingPoint()) { assert(NumParts == 1 && "Do not know what to promote to!"); Val = DAG.getNode(ISD::FP_EXTEND, DL, PartVT, Val); } else { if (ValueVT.isFloatingPoint()) { // FP values need to be bitcast, then extended if they are being put // into a larger container. ValueVT = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val); } assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && "Unknown mismatch!"); ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Val = DAG.getNode(ExtendKind, DL, ValueVT, Val); if (PartVT == MVT::x86mmx) Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); } } else if (PartBits == ValueVT.getSizeInBits()) { // Different types of the same size. assert(NumParts == 1 && PartEVT != ValueVT); Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); } else if (NumParts * PartBits < ValueVT.getSizeInBits()) { // If the parts cover less bits than value has, truncate the value. assert((PartVT.isInteger() || PartVT == MVT::x86mmx) && ValueVT.isInteger() && "Unknown mismatch!"); ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); if (PartVT == MVT::x86mmx) Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); } // The value may have changed - recompute ValueVT. ValueVT = Val.getValueType(); assert(NumParts * PartBits == ValueVT.getSizeInBits() && "Failed to tile the value with PartVT!"); if (NumParts == 1) { if (PartEVT != ValueVT) { diagnosePossiblyInvalidConstraint(*DAG.getContext(), V, "scalar-to-vector conversion failed"); Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); } Parts[0] = Val; return; } // Expand the value into multiple parts. if (NumParts & (NumParts - 1)) { // The number of parts is not a power of 2. Split off and copy the tail. assert(PartVT.isInteger() && ValueVT.isInteger() && "Do not know what to expand to!"); unsigned RoundParts = 1 << Log2_32(NumParts); unsigned RoundBits = RoundParts * PartBits; unsigned OddParts = NumParts - RoundParts; SDValue OddVal = DAG.getNode(ISD::SRL, DL, ValueVT, Val, DAG.getShiftAmountConstant(RoundBits, ValueVT, DL, /*LegalTypes*/false)); getCopyToParts(DAG, DL, OddVal, Parts + RoundParts, OddParts, PartVT, V, CallConv); if (DAG.getDataLayout().isBigEndian()) // The odd parts were reversed by getCopyToParts - unreverse them. std::reverse(Parts + RoundParts, Parts + NumParts); NumParts = RoundParts; ValueVT = EVT::getIntegerVT(*DAG.getContext(), NumParts * PartBits); Val = DAG.getNode(ISD::TRUNCATE, DL, ValueVT, Val); } // The number of parts is a power of 2. Repeatedly bisect the value using // EXTRACT_ELEMENT. Parts[0] = DAG.getNode(ISD::BITCAST, DL, EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()), Val); for (unsigned StepSize = NumParts; StepSize > 1; StepSize /= 2) { for (unsigned i = 0; i < NumParts; i += StepSize) { unsigned ThisBits = StepSize * PartBits / 2; EVT ThisVT = EVT::getIntegerVT(*DAG.getContext(), ThisBits); SDValue &Part0 = Parts[i]; SDValue &Part1 = Parts[i+StepSize/2]; Part1 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, ThisVT, Part0, DAG.getIntPtrConstant(1, DL)); Part0 = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, ThisVT, Part0, DAG.getIntPtrConstant(0, DL)); if (ThisBits == PartBits && ThisVT != PartVT) { Part0 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part0); Part1 = DAG.getNode(ISD::BITCAST, DL, PartVT, Part1); } } } if (DAG.getDataLayout().isBigEndian()) std::reverse(Parts, Parts + OrigNumParts); } static SDValue widenVectorToPartType(SelectionDAG &DAG, SDValue Val, const SDLoc &DL, EVT PartVT) { if (!PartVT.isVector()) return SDValue(); EVT ValueVT = Val.getValueType(); unsigned PartNumElts = PartVT.getVectorNumElements(); unsigned ValueNumElts = ValueVT.getVectorNumElements(); if (PartNumElts > ValueNumElts && PartVT.getVectorElementType() == ValueVT.getVectorElementType()) { EVT ElementVT = PartVT.getVectorElementType(); // Vector widening case, e.g. <2 x float> -> <4 x float>. Shuffle in // undef elements. SmallVector<SDValue, 16> Ops; DAG.ExtractVectorElements(Val, Ops); SDValue EltUndef = DAG.getUNDEF(ElementVT); for (unsigned i = ValueNumElts, e = PartNumElts; i != e; ++i) Ops.push_back(EltUndef); // FIXME: Use CONCAT for 2x -> 4x. return DAG.getBuildVector(PartVT, DL, Ops); } return SDValue(); } /// getCopyToPartsVector - Create a series of nodes that contain the specified /// value split into legal parts. static void getCopyToPartsVector(SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts, unsigned NumParts, MVT PartVT, const Value *V, Optional<CallingConv::ID> CallConv) { EVT ValueVT = Val.getValueType(); assert(ValueVT.isVector() && "Not a vector"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const bool IsABIRegCopy = CallConv.hasValue(); if (NumParts == 1) { EVT PartEVT = PartVT; if (PartEVT == ValueVT) { // Nothing to do. } else if (PartVT.getSizeInBits() == ValueVT.getSizeInBits()) { // Bitconvert vector->vector case. Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val); } else if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, PartVT)) { Val = Widened; } else if (PartVT.isVector() && PartEVT.getVectorElementType().bitsGE( ValueVT.getVectorElementType()) && PartEVT.getVectorNumElements() == ValueVT.getVectorNumElements()) { // Promoted vector extract Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); } else { if (ValueVT.getVectorNumElements() == 1) { Val = DAG.getNode( ISD::EXTRACT_VECTOR_ELT, DL, PartVT, Val, DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); } else { assert(PartVT.getSizeInBits() > ValueVT.getSizeInBits() && "lossy conversion of vector to scalar type"); EVT IntermediateType = EVT::getIntegerVT(*DAG.getContext(), ValueVT.getSizeInBits()); Val = DAG.getBitcast(IntermediateType, Val); Val = DAG.getAnyExtOrTrunc(Val, DL, PartVT); } } assert(Val.getValueType() == PartVT && "Unexpected vector part value type"); Parts[0] = Val; return; } // Handle a multi-element vector. EVT IntermediateVT; MVT RegisterVT; unsigned NumIntermediates; unsigned NumRegs; if (IsABIRegCopy) { NumRegs = TLI.getVectorTypeBreakdownForCallingConv( *DAG.getContext(), CallConv.getValue(), ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } else { NumRegs = TLI.getVectorTypeBreakdown(*DAG.getContext(), ValueVT, IntermediateVT, NumIntermediates, RegisterVT); } assert(NumRegs == NumParts && "Part count doesn't match vector breakdown!"); NumParts = NumRegs; // Silence a compiler warning. assert(RegisterVT == PartVT && "Part type doesn't match vector breakdown!"); unsigned IntermediateNumElts = IntermediateVT.isVector() ? IntermediateVT.getVectorNumElements() : 1; // Convert the vector to the appropiate type if necessary. unsigned DestVectorNoElts = NumIntermediates * IntermediateNumElts; EVT BuiltVectorTy = EVT::getVectorVT( *DAG.getContext(), IntermediateVT.getScalarType(), DestVectorNoElts); MVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); if (ValueVT != BuiltVectorTy) { if (SDValue Widened = widenVectorToPartType(DAG, Val, DL, BuiltVectorTy)) Val = Widened; Val = DAG.getNode(ISD::BITCAST, DL, BuiltVectorTy, Val); } // Split the vector into intermediate operands. SmallVector<SDValue, 8> Ops(NumIntermediates); for (unsigned i = 0; i != NumIntermediates; ++i) { if (IntermediateVT.isVector()) { Ops[i] = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, IntermediateVT, Val, DAG.getConstant(i * IntermediateNumElts, DL, IdxVT)); } else { Ops[i] = DAG.getNode( ISD::EXTRACT_VECTOR_ELT, DL, IntermediateVT, Val, DAG.getConstant(i, DL, IdxVT)); } } // Split the intermediate operands into legal parts. if (NumParts == NumIntermediates) { // If the register was not expanded, promote or copy the value, // as appropriate. for (unsigned i = 0; i != NumParts; ++i) getCopyToParts(DAG, DL, Ops[i], &Parts[i], 1, PartVT, V, CallConv); } else if (NumParts > 0) { // If the intermediate type was expanded, split each the value into // legal parts. assert(NumIntermediates != 0 && "division by zero"); assert(NumParts % NumIntermediates == 0 && "Must expand into a divisible number of parts!"); unsigned Factor = NumParts / NumIntermediates; for (unsigned i = 0; i != NumIntermediates; ++i) getCopyToParts(DAG, DL, Ops[i], &Parts[i * Factor], Factor, PartVT, V, CallConv); } } RegsForValue::RegsForValue(const SmallVector<unsigned, 4> &regs, MVT regvt, EVT valuevt, Optional<CallingConv::ID> CC) : ValueVTs(1, valuevt), RegVTs(1, regvt), Regs(regs), RegCount(1, regs.size()), CallConv(CC) {} RegsForValue::RegsForValue(LLVMContext &Context, const TargetLowering &TLI, const DataLayout &DL, unsigned Reg, Type *Ty, Optional<CallingConv::ID> CC) { ComputeValueVTs(TLI, DL, Ty, ValueVTs); CallConv = CC; for (EVT ValueVT : ValueVTs) { unsigned NumRegs = isABIMangled() ? TLI.getNumRegistersForCallingConv(Context, CC.getValue(), ValueVT) : TLI.getNumRegisters(Context, ValueVT); MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv(Context, CC.getValue(), ValueVT) : TLI.getRegisterType(Context, ValueVT); for (unsigned i = 0; i != NumRegs; ++i) Regs.push_back(Reg + i); RegVTs.push_back(RegisterVT); RegCount.push_back(NumRegs); Reg += NumRegs; } } SDValue RegsForValue::getCopyFromRegs(SelectionDAG &DAG, FunctionLoweringInfo &FuncInfo, const SDLoc &dl, SDValue &Chain, SDValue *Flag, const Value *V) const { // A Value with type {} or [0 x %t] needs no registers. if (ValueVTs.empty()) return SDValue(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // Assemble the legal parts into the final values. SmallVector<SDValue, 4> Values(ValueVTs.size()); SmallVector<SDValue, 8> Parts; for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { // Copy the legal parts from the registers. EVT ValueVT = ValueVTs[Value]; unsigned NumRegs = RegCount[Value]; MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv( *DAG.getContext(), CallConv.getValue(), RegVTs[Value]) : RegVTs[Value]; Parts.resize(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { SDValue P; if (!Flag) { P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT); } else { P = DAG.getCopyFromReg(Chain, dl, Regs[Part+i], RegisterVT, *Flag); *Flag = P.getValue(2); } Chain = P.getValue(1); Parts[i] = P; // If the source register was virtual and if we know something about it, // add an assert node. if (!Register::isVirtualRegister(Regs[Part + i]) || !RegisterVT.isInteger()) continue; const FunctionLoweringInfo::LiveOutInfo *LOI = FuncInfo.GetLiveOutRegInfo(Regs[Part+i]); if (!LOI) continue; unsigned RegSize = RegisterVT.getScalarSizeInBits(); unsigned NumSignBits = LOI->NumSignBits; unsigned NumZeroBits = LOI->Known.countMinLeadingZeros(); if (NumZeroBits == RegSize) { // The current value is a zero. // Explicitly express that as it would be easier for // optimizations to kick in. Parts[i] = DAG.getConstant(0, dl, RegisterVT); continue; } // FIXME: We capture more information than the dag can represent. For // now, just use the tightest assertzext/assertsext possible. bool isSExt; EVT FromVT(MVT::Other); if (NumZeroBits) { FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumZeroBits); isSExt = false; } else if (NumSignBits > 1) { FromVT = EVT::getIntegerVT(*DAG.getContext(), RegSize - NumSignBits + 1); isSExt = true; } else { continue; } // Add an assertion node. assert(FromVT != MVT::Other); Parts[i] = DAG.getNode(isSExt ? ISD::AssertSext : ISD::AssertZext, dl, RegisterVT, P, DAG.getValueType(FromVT)); } Values[Value] = getCopyFromParts(DAG, dl, Parts.begin(), NumRegs, RegisterVT, ValueVT, V, CallConv); Part += NumRegs; Parts.clear(); } return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values); } void RegsForValue::getCopyToRegs(SDValue Val, SelectionDAG &DAG, const SDLoc &dl, SDValue &Chain, SDValue *Flag, const Value *V, ISD::NodeType PreferredExtendType) const { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); ISD::NodeType ExtendKind = PreferredExtendType; // Get the list of the values's legal parts. unsigned NumRegs = Regs.size(); SmallVector<SDValue, 8> Parts(NumRegs); for (unsigned Value = 0, Part = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumParts = RegCount[Value]; MVT RegisterVT = isABIMangled() ? TLI.getRegisterTypeForCallingConv( *DAG.getContext(), CallConv.getValue(), RegVTs[Value]) : RegVTs[Value]; if (ExtendKind == ISD::ANY_EXTEND && TLI.isZExtFree(Val, RegisterVT)) ExtendKind = ISD::ZERO_EXTEND; getCopyToParts(DAG, dl, Val.getValue(Val.getResNo() + Value), &Parts[Part], NumParts, RegisterVT, V, CallConv, ExtendKind); Part += NumParts; } // Copy the parts into the registers. SmallVector<SDValue, 8> Chains(NumRegs); for (unsigned i = 0; i != NumRegs; ++i) { SDValue Part; if (!Flag) { Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i]); } else { Part = DAG.getCopyToReg(Chain, dl, Regs[i], Parts[i], *Flag); *Flag = Part.getValue(1); } Chains[i] = Part.getValue(0); } if (NumRegs == 1 || Flag) // If NumRegs > 1 && Flag is used then the use of the last CopyToReg is // flagged to it. That is the CopyToReg nodes and the user are considered // a single scheduling unit. If we create a TokenFactor and return it as // chain, then the TokenFactor is both a predecessor (operand) of the // user as well as a successor (the TF operands are flagged to the user). // c1, f1 = CopyToReg // c2, f2 = CopyToReg // c3 = TokenFactor c1, c2 // ... // = op c3, ..., f2 Chain = Chains[NumRegs-1]; else Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); } void RegsForValue::AddInlineAsmOperands(unsigned Code, bool HasMatching, unsigned MatchingIdx, const SDLoc &dl, SelectionDAG &DAG, std::vector<SDValue> &Ops) const { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); if (HasMatching) Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) { // Put the register class of the virtual registers in the flag word. That // way, later passes can recompute register class constraints for inline // assembly as well as normal instructions. // Don't do this for tied operands that can use the regclass information // from the def. const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); } SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); Ops.push_back(Res); if (Code == InlineAsm::Kind_Clobber) { // Clobbers should always have a 1:1 mapping with registers, and may // reference registers that have illegal (e.g. vector) types. Hence, we // shouldn't try to apply any sort of splitting logic to them. assert(Regs.size() == RegVTs.size() && Regs.size() == ValueVTs.size() && "No 1:1 mapping from clobbers to regs?"); unsigned SP = TLI.getStackPointerRegisterToSaveRestore(); (void)SP; for (unsigned I = 0, E = ValueVTs.size(); I != E; ++I) { Ops.push_back(DAG.getRegister(Regs[I], RegVTs[I])); assert( (Regs[I] != SP || DAG.getMachineFunction().getFrameInfo().hasOpaqueSPAdjustment()) && "If we clobbered the stack pointer, MFI should know about it."); } return; } for (unsigned Value = 0, Reg = 0, e = ValueVTs.size(); Value != e; ++Value) { unsigned NumRegs = TLI.getNumRegisters(*DAG.getContext(), ValueVTs[Value]); MVT RegisterVT = RegVTs[Value]; for (unsigned i = 0; i != NumRegs; ++i) { assert(Reg < Regs.size() && "Mismatch in # registers expected"); unsigned TheReg = Regs[Reg++]; Ops.push_back(DAG.getRegister(TheReg, RegisterVT)); } } } SmallVector<std::pair<unsigned, unsigned>, 4> RegsForValue::getRegsAndSizes() const { SmallVector<std::pair<unsigned, unsigned>, 4> OutVec; unsigned I = 0; for (auto CountAndVT : zip_first(RegCount, RegVTs)) { unsigned RegCount = std::get<0>(CountAndVT); MVT RegisterVT = std::get<1>(CountAndVT); unsigned RegisterSize = RegisterVT.getSizeInBits(); for (unsigned E = I + RegCount; I != E; ++I) OutVec.push_back(std::make_pair(Regs[I], RegisterSize)); } return OutVec; } void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis *aa, const TargetLibraryInfo *li) { AA = aa; GFI = gfi; LibInfo = li; DL = &DAG.getDataLayout(); Context = DAG.getContext(); LPadToCallSiteMap.clear(); SL->init(DAG.getTargetLoweringInfo(), TM, DAG.getDataLayout()); } void SelectionDAGBuilder::clear() { NodeMap.clear(); UnusedArgNodeMap.clear(); PendingLoads.clear(); PendingExports.clear(); CurInst = nullptr; HasTailCall = false; SDNodeOrder = LowestSDNodeOrder; StatepointLowering.clear(); } void SelectionDAGBuilder::clearDanglingDebugInfo() { DanglingDebugInfoMap.clear(); } SDValue SelectionDAGBuilder::getRoot() { if (PendingLoads.empty()) return DAG.getRoot(); if (PendingLoads.size() == 1) { SDValue Root = PendingLoads[0]; DAG.setRoot(Root); PendingLoads.clear(); return Root; } // Otherwise, we have to make a token factor node. SDValue Root = DAG.getTokenFactor(getCurSDLoc(), PendingLoads); PendingLoads.clear(); DAG.setRoot(Root); return Root; } SDValue SelectionDAGBuilder::getControlRoot() { SDValue Root = DAG.getRoot(); if (PendingExports.empty()) return Root; // Turn all of the CopyToReg chains into one factored node. if (Root.getOpcode() != ISD::EntryToken) { unsigned i = 0, e = PendingExports.size(); for (; i != e; ++i) { assert(PendingExports[i].getNode()->getNumOperands() > 1); if (PendingExports[i].getNode()->getOperand(0) == Root) break; // Don't add the root if we already indirectly depend on it. } if (i == e) PendingExports.push_back(Root); } Root = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, PendingExports); PendingExports.clear(); DAG.setRoot(Root); return Root; } void SelectionDAGBuilder::visit(const Instruction &I) { // Set up outgoing PHI node register values before emitting the terminator. if (I.isTerminator()) { HandlePHINodesInSuccessorBlocks(I.getParent()); } // Increase the SDNodeOrder if dealing with a non-debug instruction. if (!isa<DbgInfoIntrinsic>(I)) ++SDNodeOrder; CurInst = &I; visit(I.getOpcode(), I); if (auto *FPMO = dyn_cast<FPMathOperator>(&I)) { // Propagate the fast-math-flags of this IR instruction to the DAG node that // maps to this instruction. // TODO: We could handle all flags (nsw, etc) here. // TODO: If an IR instruction maps to >1 node, only the final node will have // flags set. if (SDNode *Node = getNodeForIRValue(&I)) { SDNodeFlags IncomingFlags; IncomingFlags.copyFMF(*FPMO); if (!Node->getFlags().isDefined()) Node->setFlags(IncomingFlags); else Node->intersectFlagsWith(IncomingFlags); } } if (!I.isTerminator() && !HasTailCall && !isStatepoint(&I)) // statepoints handle their exports internally CopyToExportRegsIfNeeded(&I); CurInst = nullptr; } void SelectionDAGBuilder::visitPHI(const PHINode &) { llvm_unreachable("SelectionDAGBuilder shouldn't visit PHI nodes!"); } void SelectionDAGBuilder::visit(unsigned Opcode, const User &I) { // Note: this doesn't use InstVisitor, because it has to work with // ConstantExpr's in addition to instructions. switch (Opcode) { default: llvm_unreachable("Unknown instruction type encountered!"); // Build the switch statement using the Instruction.def file. #define HANDLE_INST(NUM, OPCODE, CLASS) \ case Instruction::OPCODE: visit##OPCODE((const CLASS&)I); break; #include "llvm/IR/Instruction.def" } } void SelectionDAGBuilder::dropDanglingDebugInfo(const DILocalVariable *Variable, const DIExpression *Expr) { auto isMatchingDbgValue = [&](DanglingDebugInfo &DDI) { const DbgValueInst *DI = DDI.getDI(); DIVariable *DanglingVariable = DI->getVariable(); DIExpression *DanglingExpr = DI->getExpression(); if (DanglingVariable == Variable && Expr->fragmentsOverlap(DanglingExpr)) { LLVM_DEBUG(dbgs() << "Dropping dangling debug info for " << *DI << "\n"); return true; } return false; }; for (auto &DDIMI : DanglingDebugInfoMap) { DanglingDebugInfoVector &DDIV = DDIMI.second; // If debug info is to be dropped, run it through final checks to see // whether it can be salvaged. for (auto &DDI : DDIV) if (isMatchingDbgValue(DDI)) salvageUnresolvedDbgValue(DDI); DDIV.erase(remove_if(DDIV, isMatchingDbgValue), DDIV.end()); } } // resolveDanglingDebugInfo - if we saw an earlier dbg_value referring to V, // generate the debug data structures now that we've seen its definition. void SelectionDAGBuilder::resolveDanglingDebugInfo(const Value *V, SDValue Val) { auto DanglingDbgInfoIt = DanglingDebugInfoMap.find(V); if (DanglingDbgInfoIt == DanglingDebugInfoMap.end()) return; DanglingDebugInfoVector &DDIV = DanglingDbgInfoIt->second; for (auto &DDI : DDIV) { const DbgValueInst *DI = DDI.getDI(); assert(DI && "Ill-formed DanglingDebugInfo"); DebugLoc dl = DDI.getdl(); unsigned ValSDNodeOrder = Val.getNode()->getIROrder(); unsigned DbgSDNodeOrder = DDI.getSDNodeOrder(); DILocalVariable *Variable = DI->getVariable(); DIExpression *Expr = DI->getExpression(); assert(Variable->isValidLocationForIntrinsic(dl) && "Expected inlined-at fields to agree"); SDDbgValue *SDV; if (Val.getNode()) { // FIXME: I doubt that it is correct to resolve a dangling DbgValue as a // FuncArgumentDbgValue (it would be hoisted to the function entry, and if // we couldn't resolve it directly when examining the DbgValue intrinsic // in the first place we should not be more successful here). Unless we // have some test case that prove this to be correct we should avoid // calling EmitFuncArgumentDbgValue here. if (!EmitFuncArgumentDbgValue(V, Variable, Expr, dl, false, Val)) { LLVM_DEBUG(dbgs() << "Resolve dangling debug info [order=" << DbgSDNodeOrder << "] for:\n " << *DI << "\n"); LLVM_DEBUG(dbgs() << " By mapping to:\n "; Val.dump()); // Increase the SDNodeOrder for the DbgValue here to make sure it is // inserted after the definition of Val when emitting the instructions // after ISel. An alternative could be to teach // ScheduleDAGSDNodes::EmitSchedule to delay the insertion properly. LLVM_DEBUG(if (ValSDNodeOrder > DbgSDNodeOrder) dbgs() << "changing SDNodeOrder from " << DbgSDNodeOrder << " to " << ValSDNodeOrder << "\n"); SDV = getDbgValue(Val, Variable, Expr, dl, std::max(DbgSDNodeOrder, ValSDNodeOrder)); DAG.AddDbgValue(SDV, Val.getNode(), false); } else LLVM_DEBUG(dbgs() << "Resolved dangling debug info for " << *DI << "in EmitFuncArgumentDbgValue\n"); } else { LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n"); auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType()); auto SDV = DAG.getConstantDbgValue(Variable, Expr, Undef, dl, DbgSDNodeOrder); DAG.AddDbgValue(SDV, nullptr, false); } } DDIV.clear(); } void SelectionDAGBuilder::salvageUnresolvedDbgValue(DanglingDebugInfo &DDI) { Value *V = DDI.getDI()->getValue(); DILocalVariable *Var = DDI.getDI()->getVariable(); DIExpression *Expr = DDI.getDI()->getExpression(); DebugLoc DL = DDI.getdl(); DebugLoc InstDL = DDI.getDI()->getDebugLoc(); unsigned SDOrder = DDI.getSDNodeOrder(); // Currently we consider only dbg.value intrinsics -- we tell the salvager // that DW_OP_stack_value is desired. assert(isa<DbgValueInst>(DDI.getDI())); bool StackValue = true; // Can this Value can be encoded without any further work? if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) return; // Attempt to salvage back through as many instructions as possible. Bail if // a non-instruction is seen, such as a constant expression or global // variable. FIXME: Further work could recover those too. while (isa<Instruction>(V)) { Instruction &VAsInst = *cast<Instruction>(V); DIExpression *NewExpr = salvageDebugInfoImpl(VAsInst, Expr, StackValue); // If we cannot salvage any further, and haven't yet found a suitable debug // expression, bail out. if (!NewExpr) break; // New value and expr now represent this debuginfo. V = VAsInst.getOperand(0); Expr = NewExpr; // Some kind of simplification occurred: check whether the operand of the // salvaged debug expression can be encoded in this DAG. if (handleDebugValue(V, Var, Expr, DL, InstDL, SDOrder)) { LLVM_DEBUG(dbgs() << "Salvaged debug location info for:\n " << DDI.getDI() << "\nBy stripping back to:\n " << V); return; } } // This was the final opportunity to salvage this debug information, and it // couldn't be done. Place an undef DBG_VALUE at this location to terminate // any earlier variable location. auto Undef = UndefValue::get(DDI.getDI()->getVariableLocation()->getType()); auto SDV = DAG.getConstantDbgValue(Var, Expr, Undef, DL, SDNodeOrder); DAG.AddDbgValue(SDV, nullptr, false); LLVM_DEBUG(dbgs() << "Dropping debug value info for:\n " << DDI.getDI() << "\n"); LLVM_DEBUG(dbgs() << " Last seen at:\n " << *DDI.getDI()->getOperand(0) << "\n"); } bool SelectionDAGBuilder::handleDebugValue(const Value *V, DILocalVariable *Var, DIExpression *Expr, DebugLoc dl, DebugLoc InstDL, unsigned Order) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDDbgValue *SDV; if (isa<ConstantInt>(V) || isa<ConstantFP>(V) || isa<UndefValue>(V) || isa<ConstantPointerNull>(V)) { SDV = DAG.getConstantDbgValue(Var, Expr, V, dl, SDNodeOrder); DAG.AddDbgValue(SDV, nullptr, false); return true; } // If the Value is a frame index, we can create a FrameIndex debug value // without relying on the DAG at all. if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { auto SI = FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) { auto SDV = DAG.getFrameIndexDbgValue(Var, Expr, SI->second, /*IsIndirect*/ false, dl, SDNodeOrder); // Do not attach the SDNodeDbgValue to an SDNode: this variable location // is still available even if the SDNode gets optimized out. DAG.AddDbgValue(SDV, nullptr, false); return true; } } // Do not use getValue() in here; we don't want to generate code at // this point if it hasn't been done yet. SDValue N = NodeMap[V]; if (!N.getNode() && isa<Argument>(V)) // Check unused arguments map. N = UnusedArgNodeMap[V]; if (N.getNode()) { if (EmitFuncArgumentDbgValue(V, Var, Expr, dl, false, N)) return true; SDV = getDbgValue(N, Var, Expr, dl, SDNodeOrder); DAG.AddDbgValue(SDV, N.getNode(), false); return true; } // Special rules apply for the first dbg.values of parameter variables in a // function. Identify them by the fact they reference Argument Values, that // they're parameters, and they are parameters of the current function. We // need to let them dangle until they get an SDNode. bool IsParamOfFunc = isa<Argument>(V) && Var->isParameter() && !InstDL.getInlinedAt(); if (!IsParamOfFunc) { // The value is not used in this block yet (or it would have an SDNode). // We still want the value to appear for the user if possible -- if it has // an associated VReg, we can refer to that instead. auto VMI = FuncInfo.ValueMap.find(V); if (VMI != FuncInfo.ValueMap.end()) { unsigned Reg = VMI->second; // If this is a PHI node, it may be split up into several MI PHI nodes // (in FunctionLoweringInfo::set). RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), None); if (RFV.occupiesMultipleRegs()) { unsigned Offset = 0; unsigned BitsToDescribe = 0; if (auto VarSize = Var->getSizeInBits()) BitsToDescribe = *VarSize; if (auto Fragment = Expr->getFragmentInfo()) BitsToDescribe = Fragment->SizeInBits; for (auto RegAndSize : RFV.getRegsAndSizes()) { unsigned RegisterSize = RegAndSize.second; // Bail out if all bits are described already. if (Offset >= BitsToDescribe) break; unsigned FragmentSize = (Offset + RegisterSize > BitsToDescribe) ? BitsToDescribe - Offset : RegisterSize; auto FragmentExpr = DIExpression::createFragmentExpression( Expr, Offset, FragmentSize); if (!FragmentExpr) continue; SDV = DAG.getVRegDbgValue(Var, *FragmentExpr, RegAndSize.first, false, dl, SDNodeOrder); DAG.AddDbgValue(SDV, nullptr, false); Offset += RegisterSize; } } else { SDV = DAG.getVRegDbgValue(Var, Expr, Reg, false, dl, SDNodeOrder); DAG.AddDbgValue(SDV, nullptr, false); } return true; } } return false; } void SelectionDAGBuilder::resolveOrClearDbgInfo() { // Try to fixup any remaining dangling debug info -- and drop it if we can't. for (auto &Pair : DanglingDebugInfoMap) for (auto &DDI : Pair.second) salvageUnresolvedDbgValue(DDI); clearDanglingDebugInfo(); } /// getCopyFromRegs - If there was virtual register allocated for the value V /// emit CopyFromReg of the specified type Ty. Return empty SDValue() otherwise. SDValue SelectionDAGBuilder::getCopyFromRegs(const Value *V, Type *Ty) { DenseMap<const Value *, unsigned>::iterator It = FuncInfo.ValueMap.find(V); SDValue Result; if (It != FuncInfo.ValueMap.end()) { unsigned InReg = It->second; RegsForValue RFV(*DAG.getContext(), DAG.getTargetLoweringInfo(), DAG.getDataLayout(), InReg, Ty, None); // This is not an ABI copy. SDValue Chain = DAG.getEntryNode(); Result = RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); resolveDanglingDebugInfo(V, Result); } return Result; } /// getValue - Return an SDValue for the given Value. SDValue SelectionDAGBuilder::getValue(const Value *V) { // If we already have an SDValue for this value, use it. It's important // to do this first, so that we don't create a CopyFromReg if we already // have a regular SDValue. SDValue &N = NodeMap[V]; if (N.getNode()) return N; // If there's a virtual register allocated and initialized for this // value, use it. if (SDValue copyFromReg = getCopyFromRegs(V, V->getType())) return copyFromReg; // Otherwise create a new SDValue and remember it. SDValue Val = getValueImpl(V); NodeMap[V] = Val; resolveDanglingDebugInfo(V, Val); return Val; } // Return true if SDValue exists for the given Value bool SelectionDAGBuilder::findValue(const Value *V) const { return (NodeMap.find(V) != NodeMap.end()) || (FuncInfo.ValueMap.find(V) != FuncInfo.ValueMap.end()); } /// getNonRegisterValue - Return an SDValue for the given Value, but /// don't look in FuncInfo.ValueMap for a virtual register. SDValue SelectionDAGBuilder::getNonRegisterValue(const Value *V) { // If we already have an SDValue for this value, use it. SDValue &N = NodeMap[V]; if (N.getNode()) { if (isa<ConstantSDNode>(N) || isa<ConstantFPSDNode>(N)) { // Remove the debug location from the node as the node is about to be used // in a location which may differ from the original debug location. This // is relevant to Constant and ConstantFP nodes because they can appear // as constant expressions inside PHI nodes. N->setDebugLoc(DebugLoc()); } return N; } // Otherwise create a new SDValue and remember it. SDValue Val = getValueImpl(V); NodeMap[V] = Val; resolveDanglingDebugInfo(V, Val); return Val; } /// getValueImpl - Helper function for getValue and getNonRegisterValue. /// Create an SDValue for the given value. SDValue SelectionDAGBuilder::getValueImpl(const Value *V) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (const Constant *C = dyn_cast<Constant>(V)) { EVT VT = TLI.getValueType(DAG.getDataLayout(), V->getType(), true); if (const ConstantInt *CI = dyn_cast<ConstantInt>(C)) return DAG.getConstant(*CI, getCurSDLoc(), VT); if (const GlobalValue *GV = dyn_cast<GlobalValue>(C)) return DAG.getGlobalAddress(GV, getCurSDLoc(), VT); if (isa<ConstantPointerNull>(C)) { unsigned AS = V->getType()->getPointerAddressSpace(); return DAG.getConstant(0, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout(), AS)); } if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) return DAG.getConstantFP(*CFP, getCurSDLoc(), VT); if (isa<UndefValue>(C) && !V->getType()->isAggregateType()) return DAG.getUNDEF(VT); if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { visit(CE->getOpcode(), *CE); SDValue N1 = NodeMap[V]; assert(N1.getNode() && "visit didn't populate the NodeMap!"); return N1; } if (isa<ConstantStruct>(C) || isa<ConstantArray>(C)) { SmallVector<SDValue, 4> Constants; for (User::const_op_iterator OI = C->op_begin(), OE = C->op_end(); OI != OE; ++OI) { SDNode *Val = getValue(*OI).getNode(); // If the operand is an empty aggregate, there are no values. if (!Val) continue; // Add each leaf value from the operand to the Constants list // to form a flattened list of all the values. for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) Constants.push_back(SDValue(Val, i)); } return DAG.getMergeValues(Constants, getCurSDLoc()); } if (const ConstantDataSequential *CDS = dyn_cast<ConstantDataSequential>(C)) { SmallVector<SDValue, 4> Ops; for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) { SDNode *Val = getValue(CDS->getElementAsConstant(i)).getNode(); // Add each leaf value from the operand to the Constants list // to form a flattened list of all the values. for (unsigned i = 0, e = Val->getNumValues(); i != e; ++i) Ops.push_back(SDValue(Val, i)); } if (isa<ArrayType>(CDS->getType())) return DAG.getMergeValues(Ops, getCurSDLoc()); return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); } if (C->getType()->isStructTy() || C->getType()->isArrayTy()) { assert((isa<ConstantAggregateZero>(C) || isa<UndefValue>(C)) && "Unknown struct or array constant!"); SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), C->getType(), ValueVTs); unsigned NumElts = ValueVTs.size(); if (NumElts == 0) return SDValue(); // empty struct SmallVector<SDValue, 4> Constants(NumElts); for (unsigned i = 0; i != NumElts; ++i) { EVT EltVT = ValueVTs[i]; if (isa<UndefValue>(C)) Constants[i] = DAG.getUNDEF(EltVT); else if (EltVT.isFloatingPoint()) Constants[i] = DAG.getConstantFP(0, getCurSDLoc(), EltVT); else Constants[i] = DAG.getConstant(0, getCurSDLoc(), EltVT); } return DAG.getMergeValues(Constants, getCurSDLoc()); } if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) return DAG.getBlockAddress(BA, VT); VectorType *VecTy = cast<VectorType>(V->getType()); unsigned NumElements = VecTy->getNumElements(); // Now that we know the number and type of the elements, get that number of // elements into the Ops array based on what kind of constant it is. SmallVector<SDValue, 16> Ops; if (const ConstantVector *CV = dyn_cast<ConstantVector>(C)) { for (unsigned i = 0; i != NumElements; ++i) Ops.push_back(getValue(CV->getOperand(i))); } else { assert(isa<ConstantAggregateZero>(C) && "Unknown vector constant!"); EVT EltVT = TLI.getValueType(DAG.getDataLayout(), VecTy->getElementType()); SDValue Op; if (EltVT.isFloatingPoint()) Op = DAG.getConstantFP(0, getCurSDLoc(), EltVT); else Op = DAG.getConstant(0, getCurSDLoc(), EltVT); Ops.assign(NumElements, Op); } // Create a BUILD_VECTOR node. return NodeMap[V] = DAG.getBuildVector(VT, getCurSDLoc(), Ops); } // If this is a static alloca, generate it as the frameindex instead of // computation. if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) { DenseMap<const AllocaInst*, int>::iterator SI = FuncInfo.StaticAllocaMap.find(AI); if (SI != FuncInfo.StaticAllocaMap.end()) return DAG.getFrameIndex(SI->second, TLI.getFrameIndexTy(DAG.getDataLayout())); } // If this is an instruction which fast-isel has deferred, select it now. if (const Instruction *Inst = dyn_cast<Instruction>(V)) { unsigned InReg = FuncInfo.InitializeRegForValue(Inst); RegsForValue RFV(*DAG.getContext(), TLI, DAG.getDataLayout(), InReg, Inst->getType(), getABIRegCopyCC(V)); SDValue Chain = DAG.getEntryNode(); return RFV.getCopyFromRegs(DAG, FuncInfo, getCurSDLoc(), Chain, nullptr, V); } llvm_unreachable("Can't get register for value!"); } void SelectionDAGBuilder::visitCatchPad(const CatchPadInst &I) { auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); bool IsMSVCCXX = Pers == EHPersonality::MSVC_CXX; bool IsCoreCLR = Pers == EHPersonality::CoreCLR; bool IsSEH = isAsynchronousEHPersonality(Pers); bool IsWasmCXX = Pers == EHPersonality::Wasm_CXX; MachineBasicBlock *CatchPadMBB = FuncInfo.MBB; if (!IsSEH) CatchPadMBB->setIsEHScopeEntry(); // In MSVC C++ and CoreCLR, catchblocks are funclets and need prologues. if (IsMSVCCXX || IsCoreCLR) CatchPadMBB->setIsEHFuncletEntry(); // Wasm does not need catchpads anymore if (!IsWasmCXX) DAG.setRoot(DAG.getNode(ISD::CATCHPAD, getCurSDLoc(), MVT::Other, getControlRoot())); } void SelectionDAGBuilder::visitCatchRet(const CatchReturnInst &I) { // Update machine-CFG edge. MachineBasicBlock *TargetMBB = FuncInfo.MBBMap[I.getSuccessor()]; FuncInfo.MBB->addSuccessor(TargetMBB); auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); bool IsSEH = isAsynchronousEHPersonality(Pers); if (IsSEH) { // If this is not a fall-through branch or optimizations are switched off, // emit the branch. if (TargetMBB != NextBlock(FuncInfo.MBB) || TM.getOptLevel() == CodeGenOpt::None) DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(TargetMBB))); return; } // Figure out the funclet membership for the catchret's successor. // This will be used by the FuncletLayout pass to determine how to order the // BB's. // A 'catchret' returns to the outer scope's color. Value *ParentPad = I.getCatchSwitchParentPad(); const BasicBlock *SuccessorColor; if (isa<ConstantTokenNone>(ParentPad)) SuccessorColor = &FuncInfo.Fn->getEntryBlock(); else SuccessorColor = cast<Instruction>(ParentPad)->getParent(); assert(SuccessorColor && "No parent funclet for catchret!"); MachineBasicBlock *SuccessorColorMBB = FuncInfo.MBBMap[SuccessorColor]; assert(SuccessorColorMBB && "No MBB for SuccessorColor!"); // Create the terminator node. SDValue Ret = DAG.getNode(ISD::CATCHRET, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(TargetMBB), DAG.getBasicBlock(SuccessorColorMBB)); DAG.setRoot(Ret); } void SelectionDAGBuilder::visitCleanupPad(const CleanupPadInst &CPI) { // Don't emit any special code for the cleanuppad instruction. It just marks // the start of an EH scope/funclet. FuncInfo.MBB->setIsEHScopeEntry(); auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); if (Pers != EHPersonality::Wasm_CXX) { FuncInfo.MBB->setIsEHFuncletEntry(); FuncInfo.MBB->setIsCleanupFuncletEntry(); } } // For wasm, there's alwyas a single catch pad attached to a catchswitch, and // the control flow always stops at the single catch pad, as it does for a // cleanup pad. In case the exception caught is not of the types the catch pad // catches, it will be rethrown by a rethrow. static void findWasmUnwindDestinations( FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> &UnwindDests) { while (EHPadBB) { const Instruction *Pad = EHPadBB->getFirstNonPHI(); if (isa<CleanupPadInst>(Pad)) { // Stop on cleanup pads. UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); UnwindDests.back().first->setIsEHScopeEntry(); break; } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { // Add the catchpad handlers to the possible destinations. We don't // continue to the unwind destination of the catchswitch for wasm. for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob); UnwindDests.back().first->setIsEHScopeEntry(); } break; } else { continue; } } } /// When an invoke or a cleanupret unwinds to the next EH pad, there are /// many places it could ultimately go. In the IR, we have a single unwind /// destination, but in the machine CFG, we enumerate all the possible blocks. /// This function skips over imaginary basic blocks that hold catchswitch /// instructions, and finds all the "real" machine /// basic block destinations. As those destinations may not be successors of /// EHPadBB, here we also calculate the edge probability to those destinations. /// The passed-in Prob is the edge probability to EHPadBB. static void findUnwindDestinations( FunctionLoweringInfo &FuncInfo, const BasicBlock *EHPadBB, BranchProbability Prob, SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>> &UnwindDests) { EHPersonality Personality = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX; bool IsCoreCLR = Personality == EHPersonality::CoreCLR; bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX; bool IsSEH = isAsynchronousEHPersonality(Personality); if (IsWasmCXX) { findWasmUnwindDestinations(FuncInfo, EHPadBB, Prob, UnwindDests); assert(UnwindDests.size() <= 1 && "There should be at most one unwind destination for wasm"); return; } while (EHPadBB) { const Instruction *Pad = EHPadBB->getFirstNonPHI(); BasicBlock *NewEHPadBB = nullptr; if (isa<LandingPadInst>(Pad)) { // Stop on landingpads. They are not funclets. UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); break; } else if (isa<CleanupPadInst>(Pad)) { // Stop on cleanup pads. Cleanups are always funclet entries for all known // personalities. UnwindDests.emplace_back(FuncInfo.MBBMap[EHPadBB], Prob); UnwindDests.back().first->setIsEHScopeEntry(); UnwindDests.back().first->setIsEHFuncletEntry(); break; } else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) { // Add the catchpad handlers to the possible destinations. for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) { UnwindDests.emplace_back(FuncInfo.MBBMap[CatchPadBB], Prob); // For MSVC++ and the CLR, catchblocks are funclets and need prologues. if (IsMSVCCXX || IsCoreCLR) UnwindDests.back().first->setIsEHFuncletEntry(); if (!IsSEH) UnwindDests.back().first->setIsEHScopeEntry(); } NewEHPadBB = CatchSwitch->getUnwindDest(); } else { continue; } BranchProbabilityInfo *BPI = FuncInfo.BPI; if (BPI && NewEHPadBB) Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB); EHPadBB = NewEHPadBB; } } void SelectionDAGBuilder::visitCleanupRet(const CleanupReturnInst &I) { // Update successor info. SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; auto UnwindDest = I.getUnwindDest(); BranchProbabilityInfo *BPI = FuncInfo.BPI; BranchProbability UnwindDestProb = (BPI && UnwindDest) ? BPI->getEdgeProbability(FuncInfo.MBB->getBasicBlock(), UnwindDest) : BranchProbability::getZero(); findUnwindDestinations(FuncInfo, UnwindDest, UnwindDestProb, UnwindDests); for (auto &UnwindDest : UnwindDests) { UnwindDest.first->setIsEHPad(); addSuccessorWithProb(FuncInfo.MBB, UnwindDest.first, UnwindDest.second); } FuncInfo.MBB->normalizeSuccProbs(); // Create the terminator node. SDValue Ret = DAG.getNode(ISD::CLEANUPRET, getCurSDLoc(), MVT::Other, getControlRoot()); DAG.setRoot(Ret); } void SelectionDAGBuilder::visitCatchSwitch(const CatchSwitchInst &CSI) { report_fatal_error("visitCatchSwitch not yet implemented!"); } void SelectionDAGBuilder::visitRet(const ReturnInst &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); auto &DL = DAG.getDataLayout(); SDValue Chain = getControlRoot(); SmallVector<ISD::OutputArg, 8> Outs; SmallVector<SDValue, 8> OutVals; // Calls to @llvm.experimental.deoptimize don't generate a return value, so // lower // // %val = call <ty> @llvm.experimental.deoptimize() // ret <ty> %val // // differently. if (I.getParent()->getTerminatingDeoptimizeCall()) { LowerDeoptimizingReturn(); return; } if (!FuncInfo.CanLowerReturn) { unsigned DemoteReg = FuncInfo.DemoteRegister; const Function *F = I.getParent()->getParent(); // Emit a store of the return value through the virtual register. // Leave Outs empty so that LowerReturn won't try to load return // registers the usual way. SmallVector<EVT, 1> PtrValueVTs; ComputeValueVTs(TLI, DL, F->getReturnType()->getPointerTo( DAG.getDataLayout().getAllocaAddrSpace()), PtrValueVTs); SDValue RetPtr = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), DemoteReg, PtrValueVTs[0]); SDValue RetOp = getValue(I.getOperand(0)); SmallVector<EVT, 4> ValueVTs, MemVTs; SmallVector<uint64_t, 4> Offsets; ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs, &MemVTs, &Offsets); unsigned NumValues = ValueVTs.size(); SmallVector<SDValue, 4> Chains(NumValues); for (unsigned i = 0; i != NumValues; ++i) { // An aggregate return value cannot wrap around the address space, so // offsets to its parts don't wrap either. SDValue Ptr = DAG.getObjectPtrOffset(getCurSDLoc(), RetPtr, Offsets[i]); SDValue Val = RetOp.getValue(RetOp.getResNo() + i); if (MemVTs[i] != ValueVTs[i]) Val = DAG.getPtrExtOrTrunc(Val, getCurSDLoc(), MemVTs[i]); Chains[i] = DAG.getStore(Chain, getCurSDLoc(), Val, // FIXME: better loc info would be nice. Ptr, MachinePointerInfo::getUnknownStack(DAG.getMachineFunction())); } Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, Chains); } else if (I.getNumOperands() != 0) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, DL, I.getOperand(0)->getType(), ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues) { SDValue RetOp = getValue(I.getOperand(0)); const Function *F = I.getParent()->getParent(); bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters( I.getOperand(0)->getType(), F->getCallingConv(), /*IsVarArg*/ false); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Attribute::SExt)) ExtendKind = ISD::SIGN_EXTEND; else if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex, Attribute::ZExt)) ExtendKind = ISD::ZERO_EXTEND; LLVMContext &Context = F->getContext(); bool RetInReg = F->getAttributes().hasAttribute( AttributeList::ReturnIndex, Attribute::InReg); for (unsigned j = 0; j != NumValues; ++j) { EVT VT = ValueVTs[j]; if (ExtendKind != ISD::ANY_EXTEND && VT.isInteger()) VT = TLI.getTypeForExtReturn(Context, VT, ExtendKind); CallingConv::ID CC = F->getCallingConv(); unsigned NumParts = TLI.getNumRegistersForCallingConv(Context, CC, VT); MVT PartVT = TLI.getRegisterTypeForCallingConv(Context, CC, VT); SmallVector<SDValue, 4> Parts(NumParts); getCopyToParts(DAG, getCurSDLoc(), SDValue(RetOp.getNode(), RetOp.getResNo() + j), &Parts[0], NumParts, PartVT, &I, CC, ExtendKind); // 'inreg' on function refers to return value ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); if (RetInReg) Flags.setInReg(); if (I.getOperand(0)->getType()->isPointerTy()) { Flags.setPointer(); Flags.setPointerAddrSpace( cast<PointerType>(I.getOperand(0)->getType())->getAddressSpace()); } if (NeedsRegBlock) { Flags.setInConsecutiveRegs(); if (j == NumValues - 1) Flags.setInConsecutiveRegsLast(); } // Propagate extension type if any if (ExtendKind == ISD::SIGN_EXTEND) Flags.setSExt(); else if (ExtendKind == ISD::ZERO_EXTEND) Flags.setZExt(); for (unsigned i = 0; i < NumParts; ++i) { Outs.push_back(ISD::OutputArg(Flags, Parts[i].getValueType(), VT, /*isfixed=*/true, 0, 0)); OutVals.push_back(Parts[i]); } } } } // Push in swifterror virtual register as the last element of Outs. This makes // sure swifterror virtual register will be returned in the swifterror // physical register. const Function *F = I.getParent()->getParent(); if (TLI.supportSwiftError() && F->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) { assert(SwiftError.getFunctionArg() && "Need a swift error argument"); ISD::ArgFlagsTy Flags = ISD::ArgFlagsTy(); Flags.setSwiftError(); Outs.push_back(ISD::OutputArg(Flags, EVT(TLI.getPointerTy(DL)) /*vt*/, EVT(TLI.getPointerTy(DL)) /*argvt*/, true /*isfixed*/, 1 /*origidx*/, 0 /*partOffs*/)); // Create SDNode for the swifterror virtual register. OutVals.push_back( DAG.getRegister(SwiftError.getOrCreateVRegUseAt( &I, FuncInfo.MBB, SwiftError.getFunctionArg()), EVT(TLI.getPointerTy(DL)))); } bool isVarArg = DAG.getMachineFunction().getFunction().isVarArg(); CallingConv::ID CallConv = DAG.getMachineFunction().getFunction().getCallingConv(); Chain = DAG.getTargetLoweringInfo().LowerReturn( Chain, CallConv, isVarArg, Outs, OutVals, getCurSDLoc(), DAG); // Verify that the target's LowerReturn behaved as expected. assert(Chain.getNode() && Chain.getValueType() == MVT::Other && "LowerReturn didn't return a valid chain!"); // Update the DAG with the new chain value resulting from return lowering. DAG.setRoot(Chain); } /// CopyToExportRegsIfNeeded - If the given value has virtual registers /// created for it, emit nodes to copy the value into the virtual /// registers. void SelectionDAGBuilder::CopyToExportRegsIfNeeded(const Value *V) { // Skip empty types if (V->getType()->isEmptyTy()) return; DenseMap<const Value *, unsigned>::iterator VMI = FuncInfo.ValueMap.find(V); if (VMI != FuncInfo.ValueMap.end()) { assert(!V->use_empty() && "Unused value assigned virtual registers!"); CopyValueToVirtualRegister(V, VMI->second); } } /// ExportFromCurrentBlock - If this condition isn't known to be exported from /// the current basic block, add it to ValueMap now so that we'll get a /// CopyTo/FromReg. void SelectionDAGBuilder::ExportFromCurrentBlock(const Value *V) { // No need to export constants. if (!isa<Instruction>(V) && !isa<Argument>(V)) return; // Already exported? if (FuncInfo.isExportedInst(V)) return; unsigned Reg = FuncInfo.InitializeRegForValue(V); CopyValueToVirtualRegister(V, Reg); } bool SelectionDAGBuilder::isExportableFromCurrentBlock(const Value *V, const BasicBlock *FromBB) { // The operands of the setcc have to be in this block. We don't know // how to export them from some other block. if (const Instruction *VI = dyn_cast<Instruction>(V)) { // Can export from current BB. if (VI->getParent() == FromBB) return true; // Is already exported, noop. return FuncInfo.isExportedInst(V); } // If this is an argument, we can export it if the BB is the entry block or // if it is already exported. if (isa<Argument>(V)) { if (FromBB == &FromBB->getParent()->getEntryBlock()) return true; // Otherwise, can only export this if it is already exported. return FuncInfo.isExportedInst(V); } // Otherwise, constants can always be exported. return true; } /// Return branch probability calculated by BranchProbabilityInfo for IR blocks. BranchProbability SelectionDAGBuilder::getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const { BranchProbabilityInfo *BPI = FuncInfo.BPI; const BasicBlock *SrcBB = Src->getBasicBlock(); const BasicBlock *DstBB = Dst->getBasicBlock(); if (!BPI) { // If BPI is not available, set the default probability as 1 / N, where N is // the number of successors. auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1); return BranchProbability(1, SuccSize); } return BPI->getEdgeProbability(SrcBB, DstBB); } void SelectionDAGBuilder::addSuccessorWithProb(MachineBasicBlock *Src, MachineBasicBlock *Dst, BranchProbability Prob) { if (!FuncInfo.BPI) Src->addSuccessorWithoutProb(Dst); else { if (Prob.isUnknown()) Prob = getEdgeProbability(Src, Dst); Src->addSuccessor(Dst, Prob); } } static bool InBlock(const Value *V, const BasicBlock *BB) { if (const Instruction *I = dyn_cast<Instruction>(V)) return I->getParent() == BB; return true; } /// EmitBranchForMergedCondition - Helper method for FindMergedConditions. /// This function emits a branch and is used at the leaves of an OR or an /// AND operator tree. void SelectionDAGBuilder::EmitBranchForMergedCondition(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, BranchProbability TProb, BranchProbability FProb, bool InvertCond) { const BasicBlock *BB = CurBB->getBasicBlock(); // If the leaf of the tree is a comparison, merge the condition into // the caseblock. if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) { // The operands of the cmp have to be in this block. We don't know // how to export them from some other block. If this is the first block // of the sequence, no exporting is needed. if (CurBB == SwitchBB || (isExportableFromCurrentBlock(BOp->getOperand(0), BB) && isExportableFromCurrentBlock(BOp->getOperand(1), BB))) { ISD::CondCode Condition; if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) { ICmpInst::Predicate Pred = InvertCond ? IC->getInversePredicate() : IC->getPredicate(); Condition = getICmpCondCode(Pred); } else { const FCmpInst *FC = cast<FCmpInst>(Cond); FCmpInst::Predicate Pred = InvertCond ? FC->getInversePredicate() : FC->getPredicate(); Condition = getFCmpCondCode(Pred); if (TM.Options.NoNaNsFPMath) Condition = getFCmpCodeWithoutNaN(Condition); } CaseBlock CB(Condition, BOp->getOperand(0), BOp->getOperand(1), nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); SL->SwitchCases.push_back(CB); return; } } // Create a CaseBlock record representing this branch. ISD::CondCode Opc = InvertCond ? ISD::SETNE : ISD::SETEQ; CaseBlock CB(Opc, Cond, ConstantInt::getTrue(*DAG.getContext()), nullptr, TBB, FBB, CurBB, getCurSDLoc(), TProb, FProb); SL->SwitchCases.push_back(CB); } void SelectionDAGBuilder::FindMergedConditions(const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB, MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB, Instruction::BinaryOps Opc, BranchProbability TProb, BranchProbability FProb, bool InvertCond) { // Skip over not part of the tree and remember to invert op and operands at // next level. Value *NotCond; if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) && InBlock(NotCond, CurBB->getBasicBlock())) { FindMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb, !InvertCond); return; } const Instruction *BOp = dyn_cast<Instruction>(Cond); // Compute the effective opcode for Cond, taking into account whether it needs // to be inverted, e.g. // and (not (or A, B)), C // gets lowered as // and (and (not A, not B), C) unsigned BOpc = 0; if (BOp) { BOpc = BOp->getOpcode(); if (InvertCond) { if (BOpc == Instruction::And) BOpc = Instruction::Or; else if (BOpc == Instruction::Or) BOpc = Instruction::And; } } // If this node is not part of the or/and tree, emit it as a branch. if (!BOp || !(isa<BinaryOperator>(BOp) || isa<CmpInst>(BOp)) || BOpc != unsigned(Opc) || !BOp->hasOneUse() || BOp->getParent() != CurBB->getBasicBlock() || !InBlock(BOp->getOperand(0), CurBB->getBasicBlock()) || !InBlock(BOp->getOperand(1), CurBB->getBasicBlock())) { EmitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb, InvertCond); return; } // Create TmpBB after CurBB. MachineFunction::iterator BBI(CurBB); MachineFunction &MF = DAG.getMachineFunction(); MachineBasicBlock *TmpBB = MF.CreateMachineBasicBlock(CurBB->getBasicBlock()); CurBB->getParent()->insert(++BBI, TmpBB); if (Opc == Instruction::Or) { // Codegen X | Y as: // BB1: // jmp_if_X TBB // jmp TmpBB // TmpBB: // jmp_if_Y TBB // jmp FBB // // We have flexibility in setting Prob for BB1 and Prob for TmpBB. // The requirement is that // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB) // = TrueProb for original BB. // Assuming the original probabilities are A and B, one choice is to set // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to // A/(1+B) and 2B/(1+B). This choice assumes that // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB. // Another choice is to assume TrueProb for BB1 equals to TrueProb for // TmpBB, but the math is more complicated. auto NewTrueProb = TProb / 2; auto NewFalseProb = TProb / 2 + FProb; // Emit the LHS condition. FindMergedConditions(BOp->getOperand(0), TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb, NewFalseProb, InvertCond); // Normalize A/2 and B to get A/(1+B) and 2B/(1+B). SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb}; BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); // Emit the RHS condition into TmpBB. FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], Probs[1], InvertCond); } else { assert(Opc == Instruction::And && "Unknown merge op!"); // Codegen X & Y as: // BB1: // jmp_if_X TmpBB // jmp FBB // TmpBB: // jmp_if_Y TBB // jmp FBB // // This requires creation of TmpBB after CurBB. // We have flexibility in setting Prob for BB1 and Prob for TmpBB. // The requirement is that // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB) // = FalseProb for original BB. // Assuming the original probabilities are A and B, one choice is to set // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 == // TrueProb for BB1 * FalseProb for TmpBB. auto NewTrueProb = TProb + FProb / 2; auto NewFalseProb = FProb / 2; // Emit the LHS condition. FindMergedConditions(BOp->getOperand(0), TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb, NewFalseProb, InvertCond); // Normalize A and B/2 to get 2A/(1+A) and B/(1+A). SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2}; BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end()); // Emit the RHS condition into TmpBB. FindMergedConditions(BOp->getOperand(1), TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0], Probs[1], InvertCond); } } /// If the set of cases should be emitted as a series of branches, return true. /// If we should emit this as a bunch of and/or'd together conditions, return /// false. bool SelectionDAGBuilder::ShouldEmitAsBranches(const std::vector<CaseBlock> &Cases) { if (Cases.size() != 2) return true; // If this is two comparisons of the same values or'd or and'd together, they // will get folded into a single comparison, so don't emit two blocks. if ((Cases[0].CmpLHS == Cases[1].CmpLHS && Cases[0].CmpRHS == Cases[1].CmpRHS) || (Cases[0].CmpRHS == Cases[1].CmpLHS && Cases[0].CmpLHS == Cases[1].CmpRHS)) { return false; } // Handle: (X != null) | (Y != null) --> (X|Y) != 0 // Handle: (X == null) & (Y == null) --> (X|Y) == 0 if (Cases[0].CmpRHS == Cases[1].CmpRHS && Cases[0].CC == Cases[1].CC && isa<Constant>(Cases[0].CmpRHS) && cast<Constant>(Cases[0].CmpRHS)->isNullValue()) { if (Cases[0].CC == ISD::SETEQ && Cases[0].TrueBB == Cases[1].ThisBB) return false; if (Cases[0].CC == ISD::SETNE && Cases[0].FalseBB == Cases[1].ThisBB) return false; } return true; } void SelectionDAGBuilder::visitBr(const BranchInst &I) { MachineBasicBlock *BrMBB = FuncInfo.MBB; // Update machine-CFG edges. MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)]; if (I.isUnconditional()) { // Update machine-CFG edges. BrMBB->addSuccessor(Succ0MBB); // If this is not a fall-through branch or optimizations are switched off, // emit the branch. if (Succ0MBB != NextBlock(BrMBB) || TM.getOptLevel() == CodeGenOpt::None) DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(Succ0MBB))); return; } // If this condition is one of the special cases we handle, do special stuff // now. const Value *CondVal = I.getCondition(); MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)]; // If this is a series of conditions that are or'd or and'd together, emit // this as a sequence of branches instead of setcc's with and/or operations. // As long as jumps are not expensive, this should improve performance. // For example, instead of something like: // cmp A, B // C = seteq // cmp D, E // F = setle // or C, F // jnz foo // Emit: // cmp A, B // je foo // cmp D, E // jle foo if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(CondVal)) { Instruction::BinaryOps Opcode = BOp->getOpcode(); if (!DAG.getTargetLoweringInfo().isJumpExpensive() && BOp->hasOneUse() && !I.hasMetadata(LLVMContext::MD_unpredictable) && (Opcode == Instruction::And || Opcode == Instruction::Or)) { FindMergedConditions(BOp, Succ0MBB, Succ1MBB, BrMBB, BrMBB, Opcode, getEdgeProbability(BrMBB, Succ0MBB), getEdgeProbability(BrMBB, Succ1MBB), /*InvertCond=*/false); // If the compares in later blocks need to use values not currently // exported from this block, export them now. This block should always // be the first entry. assert(SL->SwitchCases[0].ThisBB == BrMBB && "Unexpected lowering!"); // Allow some cases to be rejected. if (ShouldEmitAsBranches(SL->SwitchCases)) { for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) { ExportFromCurrentBlock(SL->SwitchCases[i].CmpLHS); ExportFromCurrentBlock(SL->SwitchCases[i].CmpRHS); } // Emit the branch for this block. visitSwitchCase(SL->SwitchCases[0], BrMBB); SL->SwitchCases.erase(SL->SwitchCases.begin()); return; } // Okay, we decided not to do this, remove any inserted MBB's and clear // SwitchCases. for (unsigned i = 1, e = SL->SwitchCases.size(); i != e; ++i) FuncInfo.MF->erase(SL->SwitchCases[i].ThisBB); SL->SwitchCases.clear(); } } // Create a CaseBlock record representing this branch. CaseBlock CB(ISD::SETEQ, CondVal, ConstantInt::getTrue(*DAG.getContext()), nullptr, Succ0MBB, Succ1MBB, BrMBB, getCurSDLoc()); // Use visitSwitchCase to actually insert the fast branch sequence for this // cond branch. visitSwitchCase(CB, BrMBB); } /// visitSwitchCase - Emits the necessary code to represent a single node in /// the binary search tree resulting from lowering a switch instruction. void SelectionDAGBuilder::visitSwitchCase(CaseBlock &CB, MachineBasicBlock *SwitchBB) { SDValue Cond; SDValue CondLHS = getValue(CB.CmpLHS); SDLoc dl = CB.DL; if (CB.CC == ISD::SETTRUE) { // Branch or fall through to TrueBB. addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); SwitchBB->normalizeSuccProbs(); if (CB.TrueBB != NextBlock(SwitchBB)) { DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, getControlRoot(), DAG.getBasicBlock(CB.TrueBB))); } return; } auto &TLI = DAG.getTargetLoweringInfo(); EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), CB.CmpLHS->getType()); // Build the setcc now. if (!CB.CmpMHS) { // Fold "(X == true)" to X and "(X == false)" to !X to // handle common cases produced by branch lowering. if (CB.CmpRHS == ConstantInt::getTrue(*DAG.getContext()) && CB.CC == ISD::SETEQ) Cond = CondLHS; else if (CB.CmpRHS == ConstantInt::getFalse(*DAG.getContext()) && CB.CC == ISD::SETEQ) { SDValue True = DAG.getConstant(1, dl, CondLHS.getValueType()); Cond = DAG.getNode(ISD::XOR, dl, CondLHS.getValueType(), CondLHS, True); } else { SDValue CondRHS = getValue(CB.CmpRHS); // If a pointer's DAG type is larger than its memory type then the DAG // values are zero-extended. This breaks signed comparisons so truncate // back to the underlying type before doing the compare. if (CondLHS.getValueType() != MemVT) { CondLHS = DAG.getPtrExtOrTrunc(CondLHS, getCurSDLoc(), MemVT); CondRHS = DAG.getPtrExtOrTrunc(CondRHS, getCurSDLoc(), MemVT); } Cond = DAG.getSetCC(dl, MVT::i1, CondLHS, CondRHS, CB.CC); } } else { assert(CB.CC == ISD::SETLE && "Can handle only LE ranges now"); const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue(); const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue(); SDValue CmpOp = getValue(CB.CmpMHS); EVT VT = CmpOp.getValueType(); if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) { Cond = DAG.getSetCC(dl, MVT::i1, CmpOp, DAG.getConstant(High, dl, VT), ISD::SETLE); } else { SDValue SUB = DAG.getNode(ISD::SUB, dl, VT, CmpOp, DAG.getConstant(Low, dl, VT)); Cond = DAG.getSetCC(dl, MVT::i1, SUB, DAG.getConstant(High-Low, dl, VT), ISD::SETULE); } } // Update successor info addSuccessorWithProb(SwitchBB, CB.TrueBB, CB.TrueProb); // TrueBB and FalseBB are always different unless the incoming IR is // degenerate. This only happens when running llc on weird IR. if (CB.TrueBB != CB.FalseBB) addSuccessorWithProb(SwitchBB, CB.FalseBB, CB.FalseProb); SwitchBB->normalizeSuccProbs(); // If the lhs block is the next block, invert the condition so that we can // fall through to the lhs instead of the rhs block. if (CB.TrueBB == NextBlock(SwitchBB)) { std::swap(CB.TrueBB, CB.FalseBB); SDValue True = DAG.getConstant(1, dl, Cond.getValueType()); Cond = DAG.getNode(ISD::XOR, dl, Cond.getValueType(), Cond, True); } SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(), Cond, DAG.getBasicBlock(CB.TrueBB)); // Insert the false branch. Do this even if it's a fall through branch, // this makes it easier to do DAG optimizations which require inverting // the branch condition. BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, DAG.getBasicBlock(CB.FalseBB)); DAG.setRoot(BrCond); } /// visitJumpTable - Emit JumpTable node in the current MBB void SelectionDAGBuilder::visitJumpTable(SwitchCG::JumpTable &JT) { // Emit the code for the jump table assert(JT.Reg != -1U && "Should lower JT Header first!"); EVT PTy = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout()); SDValue Index = DAG.getCopyFromReg(getControlRoot(), getCurSDLoc(), JT.Reg, PTy); SDValue Table = DAG.getJumpTable(JT.JTI, PTy); SDValue BrJumpTable = DAG.getNode(ISD::BR_JT, getCurSDLoc(), MVT::Other, Index.getValue(1), Table, Index); DAG.setRoot(BrJumpTable); } /// visitJumpTableHeader - This function emits necessary code to produce index /// in the JumpTable from switch case. void SelectionDAGBuilder::visitJumpTableHeader(SwitchCG::JumpTable &JT, JumpTableHeader &JTH, MachineBasicBlock *SwitchBB) { SDLoc dl = getCurSDLoc(); // Subtract the lowest switch case value from the value being switched on. SDValue SwitchOp = getValue(JTH.SValue); EVT VT = SwitchOp.getValueType(); SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(JTH.First, dl, VT)); // The SDNode we just created, which holds the value being switched on minus // the smallest case value, needs to be copied to a virtual register so it // can be used as an index into the jump table in a subsequent basic block. // This value may be smaller or larger than the target's pointer type, and // therefore require extension or truncating. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SwitchOp = DAG.getZExtOrTrunc(Sub, dl, TLI.getPointerTy(DAG.getDataLayout())); unsigned JumpTableReg = FuncInfo.CreateReg(TLI.getPointerTy(DAG.getDataLayout())); SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, JumpTableReg, SwitchOp); JT.Reg = JumpTableReg; if (!JTH.OmitRangeCheck) { // Emit the range check for the jump table, and branch to the default block // for the switch statement if the value being switched on exceeds the // largest case in the switch. SDValue CMP = DAG.getSetCC( dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), Sub.getValueType()), Sub, DAG.getConstant(JTH.Last - JTH.First, dl, VT), ISD::SETUGT); SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, CopyTo, CMP, DAG.getBasicBlock(JT.Default)); // Avoid emitting unnecessary branches to the next block. if (JT.MBB != NextBlock(SwitchBB)) BrCond = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, DAG.getBasicBlock(JT.MBB)); DAG.setRoot(BrCond); } else { // Avoid emitting unnecessary branches to the next block. if (JT.MBB != NextBlock(SwitchBB)) DAG.setRoot(DAG.getNode(ISD::BR, dl, MVT::Other, CopyTo, DAG.getBasicBlock(JT.MBB))); else DAG.setRoot(CopyTo); } } /// Create a LOAD_STACK_GUARD node, and let it carry the target specific global /// variable if there exists one. static SDValue getLoadStackGuard(SelectionDAG &DAG, const SDLoc &DL, SDValue &Chain) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout()); MachineFunction &MF = DAG.getMachineFunction(); Value *Global = TLI.getSDagStackGuard(*MF.getFunction().getParent()); MachineSDNode *Node = DAG.getMachineNode(TargetOpcode::LOAD_STACK_GUARD, DL, PtrTy, Chain); if (Global) { MachinePointerInfo MPInfo(Global); auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant | MachineMemOperand::MODereferenceable; MachineMemOperand *MemRef = MF.getMachineMemOperand( MPInfo, Flags, PtrTy.getSizeInBits() / 8, DAG.getEVTAlignment(PtrTy)); DAG.setNodeMemRefs(Node, {MemRef}); } if (PtrTy != PtrMemTy) return DAG.getPtrExtOrTrunc(SDValue(Node, 0), DL, PtrMemTy); return SDValue(Node, 0); } /// Codegen a new tail for a stack protector check ParentMBB which has had its /// tail spliced into a stack protector check success bb. /// /// For a high level explanation of how this fits into the stack protector /// generation see the comment on the declaration of class /// StackProtectorDescriptor. void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineBasicBlock *ParentBB) { // First create the loads to the guard/stack slot for the comparison. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); EVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout()); MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo(); int FI = MFI.getStackProtectorIndex(); SDValue Guard; SDLoc dl = getCurSDLoc(); SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); const Module &M = *ParentBB->getParent()->getFunction().getParent(); unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext())); // Generate code to load the content of the guard slot. SDValue GuardVal = DAG.getLoad( PtrMemTy, dl, DAG.getEntryNode(), StackSlotPtr, MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align, MachineMemOperand::MOVolatile); if (TLI.useStackGuardXorFP()) GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl); // Retrieve guard check function, nullptr if instrumentation is inlined. if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) { // The target provides a guard check function to validate the guard value. // Generate a call to that function with the content of the guard slot as // argument. FunctionType *FnTy = GuardCheckFn->getFunctionType(); assert(FnTy->getNumParams() == 1 && "Invalid function signature"); TargetLowering::ArgListTy Args; TargetLowering::ArgListEntry Entry; Entry.Node = GuardVal; Entry.Ty = FnTy->getParamType(0); if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg)) Entry.IsInReg = true; Args.push_back(Entry); TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(getCurSDLoc()) .setChain(DAG.getEntryNode()) .setCallee(GuardCheckFn->getCallingConv(), FnTy->getReturnType(), getValue(GuardCheckFn), std::move(Args)); std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); DAG.setRoot(Result.second); return; } // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD. // Otherwise, emit a volatile load to retrieve the stack guard value. SDValue Chain = DAG.getEntryNode(); if (TLI.useLoadStackGuardNode()) { Guard = getLoadStackGuard(DAG, dl, Chain); } else { const Value *IRGuard = TLI.getSDagStackGuard(M); SDValue GuardPtr = getValue(IRGuard); Guard = DAG.getLoad(PtrMemTy, dl, Chain, GuardPtr, MachinePointerInfo(IRGuard, 0), Align, MachineMemOperand::MOVolatile); } // Perform the comparison via a subtract/getsetcc. EVT VT = Guard.getValueType(); SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal); SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), Sub.getValueType()), Sub, DAG.getConstant(0, dl, VT), ISD::SETNE); // If the sub is not 0, then we know the guard/stackslot do not equal, so // branch to failure MBB. SDValue BrCond = DAG.getNode(ISD::BRCOND, dl, MVT::Other, GuardVal.getOperand(0), Cmp, DAG.getBasicBlock(SPD.getFailureMBB())); // Otherwise branch to success MBB. SDValue Br = DAG.getNode(ISD::BR, dl, MVT::Other, BrCond, DAG.getBasicBlock(SPD.getSuccessMBB())); DAG.setRoot(Br); } /// Codegen the failure basic block for a stack protector check. /// /// A failure stack protector machine basic block consists simply of a call to /// __stack_chk_fail(). /// /// For a high level explanation of how this fits into the stack protector /// generation see the comment on the declaration of class /// StackProtectorDescriptor. void SelectionDAGBuilder::visitSPDescriptorFailure(StackProtectorDescriptor &SPD) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); TargetLowering::MakeLibCallOptions CallOptions; CallOptions.setDiscardResult(true); SDValue Chain = TLI.makeLibCall(DAG, RTLIB::STACKPROTECTOR_CHECK_FAIL, MVT::isVoid, None, CallOptions, getCurSDLoc()).second; // On PS4, the "return address" must still be within the calling function, // even if it's at the very end, so emit an explicit TRAP here. // Passing 'true' for doesNotReturn above won't generate the trap for us. if (TM.getTargetTriple().isPS4CPU()) Chain = DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, Chain); DAG.setRoot(Chain); } /// visitBitTestHeader - This function emits necessary code to produce value /// suitable for "bit tests" void SelectionDAGBuilder::visitBitTestHeader(BitTestBlock &B, MachineBasicBlock *SwitchBB) { SDLoc dl = getCurSDLoc(); // Subtract the minimum value. SDValue SwitchOp = getValue(B.SValue); EVT VT = SwitchOp.getValueType(); SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, SwitchOp, DAG.getConstant(B.First, dl, VT)); // Check range. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue RangeCmp = DAG.getSetCC( dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), Sub.getValueType()), Sub, DAG.getConstant(B.Range, dl, VT), ISD::SETUGT); // Determine the type of the test operands. bool UsePtrType = false; if (!TLI.isTypeLegal(VT)) { UsePtrType = true; } else { for (unsigned i = 0, e = B.Cases.size(); i != e; ++i) if (!isUIntN(VT.getSizeInBits(), B.Cases[i].Mask)) { // Switch table case range are encoded into series of masks. // Just use pointer type, it's guaranteed to fit. UsePtrType = true; break; } } if (UsePtrType) { VT = TLI.getPointerTy(DAG.getDataLayout()); Sub = DAG.getZExtOrTrunc(Sub, dl, VT); } B.RegVT = VT.getSimpleVT(); B.Reg = FuncInfo.CreateReg(B.RegVT); SDValue CopyTo = DAG.getCopyToReg(getControlRoot(), dl, B.Reg, Sub); MachineBasicBlock* MBB = B.Cases[0].ThisBB; addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb); addSuccessorWithProb(SwitchBB, MBB, B.Prob); SwitchBB->normalizeSuccProbs(); SDValue BrRange = DAG.getNode(ISD::BRCOND, dl, MVT::Other, CopyTo, RangeCmp, DAG.getBasicBlock(B.Default)); // Avoid emitting unnecessary branches to the next block. if (MBB != NextBlock(SwitchBB)) BrRange = DAG.getNode(ISD::BR, dl, MVT::Other, BrRange, DAG.getBasicBlock(MBB)); DAG.setRoot(BrRange); } /// visitBitTestCase - this function produces one "bit test" void SelectionDAGBuilder::visitBitTestCase(BitTestBlock &BB, MachineBasicBlock* NextMBB, BranchProbability BranchProbToNext, unsigned Reg, BitTestCase &B, MachineBasicBlock *SwitchBB) { SDLoc dl = getCurSDLoc(); MVT VT = BB.RegVT; SDValue ShiftOp = DAG.getCopyFromReg(getControlRoot(), dl, Reg, VT); SDValue Cmp; unsigned PopCount = countPopulation(B.Mask); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (PopCount == 1) { // Testing for a single bit; just compare the shift count with what it // would need to be to shift a 1 bit in that position. Cmp = DAG.getSetCC( dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), ShiftOp, DAG.getConstant(countTrailingZeros(B.Mask), dl, VT), ISD::SETEQ); } else if (PopCount == BB.Range) { // There is only one zero bit in the range, test for it directly. Cmp = DAG.getSetCC( dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), ShiftOp, DAG.getConstant(countTrailingOnes(B.Mask), dl, VT), ISD::SETNE); } else { // Make desired shift SDValue SwitchVal = DAG.getNode(ISD::SHL, dl, VT, DAG.getConstant(1, dl, VT), ShiftOp); // Emit bit tests and jumps SDValue AndOp = DAG.getNode(ISD::AND, dl, VT, SwitchVal, DAG.getConstant(B.Mask, dl, VT)); Cmp = DAG.getSetCC( dl, TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT), AndOp, DAG.getConstant(0, dl, VT), ISD::SETNE); } // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb. addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb); // The branch probability from SwitchBB to NextMBB is BranchProbToNext. addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext); // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is // one as they are relative probabilities (and thus work more like weights), // and hence we need to normalize them to let the sum of them become one. SwitchBB->normalizeSuccProbs(); SDValue BrAnd = DAG.getNode(ISD::BRCOND, dl, MVT::Other, getControlRoot(), Cmp, DAG.getBasicBlock(B.TargetBB)); // Avoid emitting unnecessary branches to the next block. if (NextMBB != NextBlock(SwitchBB)) BrAnd = DAG.getNode(ISD::BR, dl, MVT::Other, BrAnd, DAG.getBasicBlock(NextMBB)); DAG.setRoot(BrAnd); } void SelectionDAGBuilder::visitInvoke(const InvokeInst &I) { MachineBasicBlock *InvokeMBB = FuncInfo.MBB; // Retrieve successors. Look through artificial IR level blocks like // catchswitch for successors. MachineBasicBlock *Return = FuncInfo.MBBMap[I.getSuccessor(0)]; const BasicBlock *EHPadBB = I.getSuccessor(1); // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't // have to do anything here to lower funclet bundles. assert(!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && "Cannot lower invokes with arbitrary operand bundles yet!"); const Value *Callee(I.getCalledValue()); const Function *Fn = dyn_cast<Function>(Callee); if (isa<InlineAsm>(Callee)) visitInlineAsm(&I); else if (Fn && Fn->isIntrinsic()) { switch (Fn->getIntrinsicID()) { default: llvm_unreachable("Cannot invoke this intrinsic"); case Intrinsic::donothing: // Ignore invokes to @llvm.donothing: jump directly to the next BB. break; case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: visitPatchpoint(&I, EHPadBB); break; case Intrinsic::experimental_gc_statepoint: LowerStatepoint(ImmutableStatepoint(&I), EHPadBB); break; case Intrinsic::wasm_rethrow_in_catch: { // This is usually done in visitTargetIntrinsic, but this intrinsic is // special because it can be invoked, so we manually lower it to a DAG // node here. SmallVector<SDValue, 8> Ops; Ops.push_back(getRoot()); // inchain const TargetLowering &TLI = DAG.getTargetLoweringInfo(); Ops.push_back( DAG.getTargetConstant(Intrinsic::wasm_rethrow_in_catch, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); SDVTList VTs = DAG.getVTList(ArrayRef<EVT>({MVT::Other})); // outchain DAG.setRoot(DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops)); break; } } } else if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) { // Currently we do not lower any intrinsic calls with deopt operand bundles. // Eventually we will support lowering the @llvm.experimental.deoptimize // intrinsic, and right now there are no plans to support other intrinsics // with deopt state. LowerCallSiteWithDeoptBundle(&I, getValue(Callee), EHPadBB); } else { LowerCallTo(&I, getValue(Callee), false, EHPadBB); } // If the value of the invoke is used outside of its defining block, make it // available as a virtual register. // We already took care of the exported value for the statepoint instruction // during call to the LowerStatepoint. if (!isStatepoint(I)) { CopyToExportRegsIfNeeded(&I); } SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests; BranchProbabilityInfo *BPI = FuncInfo.BPI; BranchProbability EHPadBBProb = BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB) : BranchProbability::getZero(); findUnwindDestinations(FuncInfo, EHPadBB, EHPadBBProb, UnwindDests); // Update successor info. addSuccessorWithProb(InvokeMBB, Return); for (auto &UnwindDest : UnwindDests) { UnwindDest.first->setIsEHPad(); addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second); } InvokeMBB->normalizeSuccProbs(); // Drop into normal successor. DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(Return))); } void SelectionDAGBuilder::visitCallBr(const CallBrInst &I) { MachineBasicBlock *CallBrMBB = FuncInfo.MBB; // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't // have to do anything here to lower funclet bundles. assert(!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && "Cannot lower callbrs with arbitrary operand bundles yet!"); assert(isa<InlineAsm>(I.getCalledValue()) && "Only know how to handle inlineasm callbr"); visitInlineAsm(&I); // Retrieve successors. MachineBasicBlock *Return = FuncInfo.MBBMap[I.getDefaultDest()]; // Update successor info. addSuccessorWithProb(CallBrMBB, Return); for (unsigned i = 0, e = I.getNumIndirectDests(); i < e; ++i) { MachineBasicBlock *Target = FuncInfo.MBBMap[I.getIndirectDest(i)]; addSuccessorWithProb(CallBrMBB, Target); } CallBrMBB->normalizeSuccProbs(); // Drop into default successor. DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(Return))); } void SelectionDAGBuilder::visitResume(const ResumeInst &RI) { llvm_unreachable("SelectionDAGBuilder shouldn't visit resume instructions!"); } void SelectionDAGBuilder::visitLandingPad(const LandingPadInst &LP) { assert(FuncInfo.MBB->isEHPad() && "Call to landingpad not in landing pad!"); // If there aren't registers to copy the values into (e.g., during SjLj // exceptions), then don't bother to create these DAG nodes. const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const Constant *PersonalityFn = FuncInfo.Fn->getPersonalityFn(); if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 && TLI.getExceptionSelectorRegister(PersonalityFn) == 0) return; // If landingpad's return type is token type, we don't create DAG nodes // for its exception pointer and selector value. The extraction of exception // pointer or selector value from token type landingpads is not currently // supported. if (LP.getType()->isTokenTy()) return; SmallVector<EVT, 2> ValueVTs; SDLoc dl = getCurSDLoc(); ComputeValueVTs(TLI, DAG.getDataLayout(), LP.getType(), ValueVTs); assert(ValueVTs.size() == 2 && "Only two-valued landingpads are supported"); // Get the two live-in registers as SDValues. The physregs have already been // copied into virtual registers. SDValue Ops[2]; if (FuncInfo.ExceptionPointerVirtReg) { Ops[0] = DAG.getZExtOrTrunc( DAG.getCopyFromReg(DAG.getEntryNode(), dl, FuncInfo.ExceptionPointerVirtReg, TLI.getPointerTy(DAG.getDataLayout())), dl, ValueVTs[0]); } else { Ops[0] = DAG.getConstant(0, dl, TLI.getPointerTy(DAG.getDataLayout())); } Ops[1] = DAG.getZExtOrTrunc( DAG.getCopyFromReg(DAG.getEntryNode(), dl, FuncInfo.ExceptionSelectorVirtReg, TLI.getPointerTy(DAG.getDataLayout())), dl, ValueVTs[1]); // Merge into one. SDValue Res = DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Ops); setValue(&LP, Res); } void SelectionDAGBuilder::UpdateSplitBlock(MachineBasicBlock *First, MachineBasicBlock *Last) { // Update JTCases. for (unsigned i = 0, e = SL->JTCases.size(); i != e; ++i) if (SL->JTCases[i].first.HeaderBB == First) SL->JTCases[i].first.HeaderBB = Last; // Update BitTestCases. for (unsigned i = 0, e = SL->BitTestCases.size(); i != e; ++i) if (SL->BitTestCases[i].Parent == First) SL->BitTestCases[i].Parent = Last; } void SelectionDAGBuilder::visitIndirectBr(const IndirectBrInst &I) { MachineBasicBlock *IndirectBrMBB = FuncInfo.MBB; // Update machine-CFG edges with unique successors. SmallSet<BasicBlock*, 32> Done; for (unsigned i = 0, e = I.getNumSuccessors(); i != e; ++i) { BasicBlock *BB = I.getSuccessor(i); bool Inserted = Done.insert(BB).second; if (!Inserted) continue; MachineBasicBlock *Succ = FuncInfo.MBBMap[BB]; addSuccessorWithProb(IndirectBrMBB, Succ); } IndirectBrMBB->normalizeSuccProbs(); DAG.setRoot(DAG.getNode(ISD::BRIND, getCurSDLoc(), MVT::Other, getControlRoot(), getValue(I.getAddress()))); } void SelectionDAGBuilder::visitUnreachable(const UnreachableInst &I) { if (!DAG.getTarget().Options.TrapUnreachable) return; // We may be able to ignore unreachable behind a noreturn call. if (DAG.getTarget().Options.NoTrapAfterNoreturn) { const BasicBlock &BB = *I.getParent(); if (&I != &BB.front()) { BasicBlock::const_iterator PredI = std::prev(BasicBlock::const_iterator(&I)); if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) { if (Call->doesNotReturn()) return; } } } DAG.setRoot(DAG.getNode(ISD::TRAP, getCurSDLoc(), MVT::Other, DAG.getRoot())); } void SelectionDAGBuilder::visitFSub(const User &I) { // -0.0 - X --> fneg Type *Ty = I.getType(); if (isa<Constant>(I.getOperand(0)) && I.getOperand(0) == ConstantFP::getZeroValueForNegation(Ty)) { SDValue Op2 = getValue(I.getOperand(1)); setValue(&I, DAG.getNode(ISD::FNEG, getCurSDLoc(), Op2.getValueType(), Op2)); return; } visitBinary(I, ISD::FSUB); } /// Checks if the given instruction performs a vector reduction, in which case /// we have the freedom to alter the elements in the result as long as the /// reduction of them stays unchanged. static bool isVectorReductionOp(const User *I) { const Instruction *Inst = dyn_cast<Instruction>(I); if (!Inst || !Inst->getType()->isVectorTy()) return false; auto OpCode = Inst->getOpcode(); switch (OpCode) { case Instruction::Add: case Instruction::Mul: case Instruction::And: case Instruction::Or: case Instruction::Xor: break; case Instruction::FAdd: case Instruction::FMul: if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst)) if (FPOp->getFastMathFlags().isFast()) break; LLVM_FALLTHROUGH; default: return false; } unsigned ElemNum = Inst->getType()->getVectorNumElements(); // Ensure the reduction size is a power of 2. if (!isPowerOf2_32(ElemNum)) return false; unsigned ElemNumToReduce = ElemNum; // Do DFS search on the def-use chain from the given instruction. We only // allow four kinds of operations during the search until we reach the // instruction that extracts the first element from the vector: // // 1. The reduction operation of the same opcode as the given instruction. // // 2. PHI node. // // 3. ShuffleVector instruction together with a reduction operation that // does a partial reduction. // // 4. ExtractElement that extracts the first element from the vector, and we // stop searching the def-use chain here. // // 3 & 4 above perform a reduction on all elements of the vector. We push defs // from 1-3 to the stack to continue the DFS. The given instruction is not // a reduction operation if we meet any other instructions other than those // listed above. SmallVector<const User *, 16> UsersToVisit{Inst}; SmallPtrSet<const User *, 16> Visited; bool ReduxExtracted = false; while (!UsersToVisit.empty()) { auto User = UsersToVisit.back(); UsersToVisit.pop_back(); if (!Visited.insert(User).second) continue; for (const auto &U : User->users()) { auto Inst = dyn_cast<Instruction>(U); if (!Inst) return false; if (Inst->getOpcode() == OpCode || isa<PHINode>(U)) { if (const FPMathOperator *FPOp = dyn_cast<const FPMathOperator>(Inst)) if (!isa<PHINode>(FPOp) && !FPOp->getFastMathFlags().isFast()) return false; UsersToVisit.push_back(U); } else if (const ShuffleVectorInst *ShufInst = dyn_cast<ShuffleVectorInst>(U)) { // Detect the following pattern: A ShuffleVector instruction together // with a reduction that do partial reduction on the first and second // ElemNumToReduce / 2 elements, and store the result in // ElemNumToReduce / 2 elements in another vector. unsigned ResultElements = ShufInst->getType()->getVectorNumElements(); if (ResultElements < ElemNum) return false; if (ElemNumToReduce == 1) return false; if (!isa<UndefValue>(U->getOperand(1))) return false; for (unsigned i = 0; i < ElemNumToReduce / 2; ++i) if (ShufInst->getMaskValue(i) != int(i + ElemNumToReduce / 2)) return false; for (unsigned i = ElemNumToReduce / 2; i < ElemNum; ++i) if (ShufInst->getMaskValue(i) != -1) return false; // There is only one user of this ShuffleVector instruction, which // must be a reduction operation. if (!U->hasOneUse()) return false; auto U2 = dyn_cast<Instruction>(*U->user_begin()); if (!U2 || U2->getOpcode() != OpCode) return false; // Check operands of the reduction operation. if ((U2->getOperand(0) == U->getOperand(0) && U2->getOperand(1) == U) || (U2->getOperand(1) == U->getOperand(0) && U2->getOperand(0) == U)) { UsersToVisit.push_back(U2); ElemNumToReduce /= 2; } else return false; } else if (isa<ExtractElementInst>(U)) { // At this moment we should have reduced all elements in the vector. if (ElemNumToReduce != 1) return false; const ConstantInt *Val = dyn_cast<ConstantInt>(U->getOperand(1)); if (!Val || !Val->isZero()) return false; ReduxExtracted = true; } else return false; } } return ReduxExtracted; } void SelectionDAGBuilder::visitUnary(const User &I, unsigned Opcode) { SDNodeFlags Flags; SDValue Op = getValue(I.getOperand(0)); SDValue UnNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op.getValueType(), Op, Flags); setValue(&I, UnNodeValue); } void SelectionDAGBuilder::visitBinary(const User &I, unsigned Opcode) { SDNodeFlags Flags; if (auto *OFBinOp = dyn_cast<OverflowingBinaryOperator>(&I)) { Flags.setNoSignedWrap(OFBinOp->hasNoSignedWrap()); Flags.setNoUnsignedWrap(OFBinOp->hasNoUnsignedWrap()); } if (auto *ExactOp = dyn_cast<PossiblyExactOperator>(&I)) { Flags.setExact(ExactOp->isExact()); } if (isVectorReductionOp(&I)) { Flags.setVectorReduction(true); LLVM_DEBUG(dbgs() << "Detected a reduction operation:" << I << "\n"); } SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); SDValue BinNodeValue = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, Flags); setValue(&I, BinNodeValue); } void SelectionDAGBuilder::visitShift(const User &I, unsigned Opcode) { SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); EVT ShiftTy = DAG.getTargetLoweringInfo().getShiftAmountTy( Op1.getValueType(), DAG.getDataLayout()); // Coerce the shift amount to the right type if we can. if (!I.getType()->isVectorTy() && Op2.getValueType() != ShiftTy) { unsigned ShiftSize = ShiftTy.getSizeInBits(); unsigned Op2Size = Op2.getValueSizeInBits(); SDLoc DL = getCurSDLoc(); // If the operand is smaller than the shift count type, promote it. if (ShiftSize > Op2Size) Op2 = DAG.getNode(ISD::ZERO_EXTEND, DL, ShiftTy, Op2); // If the operand is larger than the shift count type but the shift // count type has enough bits to represent any shift value, truncate // it now. This is a common case and it exposes the truncate to // optimization early. else if (ShiftSize >= Log2_32_Ceil(Op2.getValueSizeInBits())) Op2 = DAG.getNode(ISD::TRUNCATE, DL, ShiftTy, Op2); // Otherwise we'll need to temporarily settle for some other convenient // type. Type legalization will make adjustments once the shiftee is split. else Op2 = DAG.getZExtOrTrunc(Op2, DL, MVT::i32); } bool nuw = false; bool nsw = false; bool exact = false; if (Opcode == ISD::SRL || Opcode == ISD::SRA || Opcode == ISD::SHL) { if (const OverflowingBinaryOperator *OFBinOp = dyn_cast<const OverflowingBinaryOperator>(&I)) { nuw = OFBinOp->hasNoUnsignedWrap(); nsw = OFBinOp->hasNoSignedWrap(); } if (const PossiblyExactOperator *ExactOp = dyn_cast<const PossiblyExactOperator>(&I)) exact = ExactOp->isExact(); } SDNodeFlags Flags; Flags.setExact(exact); Flags.setNoSignedWrap(nsw); Flags.setNoUnsignedWrap(nuw); SDValue Res = DAG.getNode(Opcode, getCurSDLoc(), Op1.getValueType(), Op1, Op2, Flags); setValue(&I, Res); } void SelectionDAGBuilder::visitSDiv(const User &I) { SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); SDNodeFlags Flags; Flags.setExact(isa<PossiblyExactOperator>(&I) && cast<PossiblyExactOperator>(&I)->isExact()); setValue(&I, DAG.getNode(ISD::SDIV, getCurSDLoc(), Op1.getValueType(), Op1, Op2, Flags)); } void SelectionDAGBuilder::visitICmp(const User &I) { ICmpInst::Predicate predicate = ICmpInst::BAD_ICMP_PREDICATE; if (const ICmpInst *IC = dyn_cast<ICmpInst>(&I)) predicate = IC->getPredicate(); else if (const ConstantExpr *IC = dyn_cast<ConstantExpr>(&I)) predicate = ICmpInst::Predicate(IC->getPredicate()); SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Opcode = getICmpCondCode(predicate); auto &TLI = DAG.getTargetLoweringInfo(); EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); // If a pointer's DAG type is larger than its memory type then the DAG values // are zero-extended. This breaks signed comparisons so truncate back to the // underlying type before doing the compare. if (Op1.getValueType() != MemVT) { Op1 = DAG.getPtrExtOrTrunc(Op1, getCurSDLoc(), MemVT); Op2 = DAG.getPtrExtOrTrunc(Op2, getCurSDLoc(), MemVT); } EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Opcode)); } void SelectionDAGBuilder::visitFCmp(const User &I) { FCmpInst::Predicate predicate = FCmpInst::BAD_FCMP_PREDICATE; if (const FCmpInst *FC = dyn_cast<FCmpInst>(&I)) predicate = FC->getPredicate(); else if (const ConstantExpr *FC = dyn_cast<ConstantExpr>(&I)) predicate = FCmpInst::Predicate(FC->getPredicate()); SDValue Op1 = getValue(I.getOperand(0)); SDValue Op2 = getValue(I.getOperand(1)); ISD::CondCode Condition = getFCmpCondCode(predicate); auto *FPMO = dyn_cast<FPMathOperator>(&I); if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath) Condition = getFCmpCodeWithoutNaN(Condition); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getSetCC(getCurSDLoc(), DestVT, Op1, Op2, Condition)); } // Check if the condition of the select has one use or two users that are both // selects with the same condition. static bool hasOnlySelectUsers(const Value *Cond) { return llvm::all_of(Cond->users(), [](const Value *V) { return isa<SelectInst>(V); }); } void SelectionDAGBuilder::visitSelect(const User &I) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), I.getType(), ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; SmallVector<SDValue, 4> Values(NumValues); SDValue Cond = getValue(I.getOperand(0)); SDValue LHSVal = getValue(I.getOperand(1)); SDValue RHSVal = getValue(I.getOperand(2)); auto BaseOps = {Cond}; ISD::NodeType OpCode = Cond.getValueType().isVector() ? ISD::VSELECT : ISD::SELECT; bool IsUnaryAbs = false; // Min/max matching is only viable if all output VTs are the same. if (is_splat(ValueVTs)) { EVT VT = ValueVTs[0]; LLVMContext &Ctx = *DAG.getContext(); auto &TLI = DAG.getTargetLoweringInfo(); // We care about the legality of the operation after it has been type // legalized. while (TLI.getTypeAction(Ctx, VT) != TargetLoweringBase::TypeLegal) VT = TLI.getTypeToTransformTo(Ctx, VT); // If the vselect is legal, assume we want to leave this as a vector setcc + // vselect. Otherwise, if this is going to be scalarized, we want to see if // min/max is legal on the scalar type. bool UseScalarMinMax = VT.isVector() && !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT); Value *LHS, *RHS; auto SPR = matchSelectPattern(const_cast<User*>(&I), LHS, RHS); ISD::NodeType Opc = ISD::DELETED_NODE; switch (SPR.Flavor) { case SPF_UMAX: Opc = ISD::UMAX; break; case SPF_UMIN: Opc = ISD::UMIN; break; case SPF_SMAX: Opc = ISD::SMAX; break; case SPF_SMIN: Opc = ISD::SMIN; break; case SPF_FMINNUM: switch (SPR.NaNBehavior) { case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); case SPNB_RETURNS_NAN: Opc = ISD::FMINIMUM; break; case SPNB_RETURNS_OTHER: Opc = ISD::FMINNUM; break; case SPNB_RETURNS_ANY: { if (TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT)) Opc = ISD::FMINNUM; else if (TLI.isOperationLegalOrCustom(ISD::FMINIMUM, VT)) Opc = ISD::FMINIMUM; else if (UseScalarMinMax) Opc = TLI.isOperationLegalOrCustom(ISD::FMINNUM, VT.getScalarType()) ? ISD::FMINNUM : ISD::FMINIMUM; break; } } break; case SPF_FMAXNUM: switch (SPR.NaNBehavior) { case SPNB_NA: llvm_unreachable("No NaN behavior for FP op?"); case SPNB_RETURNS_NAN: Opc = ISD::FMAXIMUM; break; case SPNB_RETURNS_OTHER: Opc = ISD::FMAXNUM; break; case SPNB_RETURNS_ANY: if (TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT)) Opc = ISD::FMAXNUM; else if (TLI.isOperationLegalOrCustom(ISD::FMAXIMUM, VT)) Opc = ISD::FMAXIMUM; else if (UseScalarMinMax) Opc = TLI.isOperationLegalOrCustom(ISD::FMAXNUM, VT.getScalarType()) ? ISD::FMAXNUM : ISD::FMAXIMUM; break; } break; case SPF_ABS: IsUnaryAbs = true; Opc = ISD::ABS; break; case SPF_NABS: // TODO: we need to produce sub(0, abs(X)). default: break; } if (!IsUnaryAbs && Opc != ISD::DELETED_NODE && (TLI.isOperationLegalOrCustom(Opc, VT) || (UseScalarMinMax && TLI.isOperationLegalOrCustom(Opc, VT.getScalarType()))) && // If the underlying comparison instruction is used by any other // instruction, the consumed instructions won't be destroyed, so it is // not profitable to convert to a min/max. hasOnlySelectUsers(cast<SelectInst>(I).getCondition())) { OpCode = Opc; LHSVal = getValue(LHS); RHSVal = getValue(RHS); BaseOps = {}; } if (IsUnaryAbs) { OpCode = Opc; LHSVal = getValue(LHS); BaseOps = {}; } } if (IsUnaryAbs) { for (unsigned i = 0; i != NumValues; ++i) { Values[i] = DAG.getNode(OpCode, getCurSDLoc(), LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); } } else { for (unsigned i = 0; i != NumValues; ++i) { SmallVector<SDValue, 3> Ops(BaseOps.begin(), BaseOps.end()); Ops.push_back(SDValue(LHSVal.getNode(), LHSVal.getResNo() + i)); Ops.push_back(SDValue(RHSVal.getNode(), RHSVal.getResNo() + i)); Values[i] = DAG.getNode( OpCode, getCurSDLoc(), LHSVal.getNode()->getValueType(LHSVal.getResNo() + i), Ops); } } setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), DAG.getVTList(ValueVTs), Values)); } void SelectionDAGBuilder::visitTrunc(const User &I) { // TruncInst cannot be a no-op cast because sizeof(src) > sizeof(dest). SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitZExt(const User &I) { // ZExt cannot be a no-op cast because sizeof(src) < sizeof(dest). // ZExt also can't be a cast to bool for same reason. So, nothing much to do SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitSExt(const User &I) { // SExt cannot be a no-op cast because sizeof(src) < sizeof(dest). // SExt also can't be a cast to bool for same reason. So, nothing much to do SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitFPTrunc(const User &I) { // FPTrunc is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); SDLoc dl = getCurSDLoc(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::FP_ROUND, dl, DestVT, N, DAG.getTargetConstant( 0, dl, TLI.getPointerTy(DAG.getDataLayout())))); } void SelectionDAGBuilder::visitFPExt(const User &I) { // FPExt is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::FP_EXTEND, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitFPToUI(const User &I) { // FPToUI is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::FP_TO_UINT, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitFPToSI(const User &I) { // FPToSI is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::FP_TO_SINT, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitUIToFP(const User &I) { // UIToFP is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::UINT_TO_FP, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitSIToFP(const User &I) { // SIToFP is never a no-op cast, no need to check SDValue N = getValue(I.getOperand(0)); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(ISD::SINT_TO_FP, getCurSDLoc(), DestVT, N)); } void SelectionDAGBuilder::visitPtrToInt(const User &I) { // What to do depends on the size of the integer and the size of the pointer. // We can either truncate, zero extend, or no-op, accordingly. SDValue N = getValue(I.getOperand(0)); auto &TLI = DAG.getTargetLoweringInfo(); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getOperand(0)->getType()); N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), PtrMemVT); N = DAG.getZExtOrTrunc(N, getCurSDLoc(), DestVT); setValue(&I, N); } void SelectionDAGBuilder::visitIntToPtr(const User &I) { // What to do depends on the size of the integer and the size of the pointer. // We can either truncate, zero extend, or no-op, accordingly. SDValue N = getValue(I.getOperand(0)); auto &TLI = DAG.getTargetLoweringInfo(); EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); EVT PtrMemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType()); N = DAG.getZExtOrTrunc(N, getCurSDLoc(), PtrMemVT); N = DAG.getPtrExtOrTrunc(N, getCurSDLoc(), DestVT); setValue(&I, N); } void SelectionDAGBuilder::visitBitCast(const User &I) { SDValue N = getValue(I.getOperand(0)); SDLoc dl = getCurSDLoc(); EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType()); // BitCast assures us that source and destination are the same size so this is // either a BITCAST or a no-op. if (DestVT != N.getValueType()) setValue(&I, DAG.getNode(ISD::BITCAST, dl, DestVT, N)); // convert types. // Check if the original LLVM IR Operand was a ConstantInt, because getValue() // might fold any kind of constant expression to an integer constant and that // is not what we are looking for. Only recognize a bitcast of a genuine // constant integer as an opaque constant. else if(ConstantInt *C = dyn_cast<ConstantInt>(I.getOperand(0))) setValue(&I, DAG.getConstant(C->getValue(), dl, DestVT, /*isTarget=*/false, /*isOpaque*/true)); else setValue(&I, N); // noop cast. } void SelectionDAGBuilder::visitAddrSpaceCast(const User &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const Value *SV = I.getOperand(0); SDValue N = getValue(SV); EVT DestVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); unsigned SrcAS = SV->getType()->getPointerAddressSpace(); unsigned DestAS = I.getType()->getPointerAddressSpace(); if (!TLI.isNoopAddrSpaceCast(SrcAS, DestAS)) N = DAG.getAddrSpaceCast(getCurSDLoc(), DestVT, N, SrcAS, DestAS); setValue(&I, N); } void SelectionDAGBuilder::visitInsertElement(const User &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue InVec = getValue(I.getOperand(0)); SDValue InVal = getValue(I.getOperand(1)); SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(2)), getCurSDLoc(), TLI.getVectorIdxTy(DAG.getDataLayout())); setValue(&I, DAG.getNode(ISD::INSERT_VECTOR_ELT, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()), InVec, InVal, InIdx)); } void SelectionDAGBuilder::visitExtractElement(const User &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue InVec = getValue(I.getOperand(0)); SDValue InIdx = DAG.getSExtOrTrunc(getValue(I.getOperand(1)), getCurSDLoc(), TLI.getVectorIdxTy(DAG.getDataLayout())); setValue(&I, DAG.getNode(ISD::EXTRACT_VECTOR_ELT, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType()), InVec, InIdx)); } void SelectionDAGBuilder::visitShuffleVector(const User &I) { SDValue Src1 = getValue(I.getOperand(0)); SDValue Src2 = getValue(I.getOperand(1)); SDLoc DL = getCurSDLoc(); SmallVector<int, 8> Mask; ShuffleVectorInst::getShuffleMask(cast<Constant>(I.getOperand(2)), Mask); unsigned MaskNumElts = Mask.size(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); EVT SrcVT = Src1.getValueType(); unsigned SrcNumElts = SrcVT.getVectorNumElements(); if (SrcNumElts == MaskNumElts) { setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, Mask)); return; } // Normalize the shuffle vector since mask and vector length don't match. if (SrcNumElts < MaskNumElts) { // Mask is longer than the source vectors. We can use concatenate vector to // make the mask and vectors lengths match. if (MaskNumElts % SrcNumElts == 0) { // Mask length is a multiple of the source vector length. // Check if the shuffle is some kind of concatenation of the input // vectors. unsigned NumConcat = MaskNumElts / SrcNumElts; bool IsConcat = true; SmallVector<int, 8> ConcatSrcs(NumConcat, -1); for (unsigned i = 0; i != MaskNumElts; ++i) { int Idx = Mask[i]; if (Idx < 0) continue; // Ensure the indices in each SrcVT sized piece are sequential and that // the same source is used for the whole piece. if ((Idx % SrcNumElts != (i % SrcNumElts)) || (ConcatSrcs[i / SrcNumElts] >= 0 && ConcatSrcs[i / SrcNumElts] != (int)(Idx / SrcNumElts))) { IsConcat = false; break; } // Remember which source this index came from. ConcatSrcs[i / SrcNumElts] = Idx / SrcNumElts; } // The shuffle is concatenating multiple vectors together. Just emit // a CONCAT_VECTORS operation. if (IsConcat) { SmallVector<SDValue, 8> ConcatOps; for (auto Src : ConcatSrcs) { if (Src < 0) ConcatOps.push_back(DAG.getUNDEF(SrcVT)); else if (Src == 0) ConcatOps.push_back(Src1); else ConcatOps.push_back(Src2); } setValue(&I, DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ConcatOps)); return; } } unsigned PaddedMaskNumElts = alignTo(MaskNumElts, SrcNumElts); unsigned NumConcat = PaddedMaskNumElts / SrcNumElts; EVT PaddedVT = EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), PaddedMaskNumElts); // Pad both vectors with undefs to make them the same length as the mask. SDValue UndefVal = DAG.getUNDEF(SrcVT); SmallVector<SDValue, 8> MOps1(NumConcat, UndefVal); SmallVector<SDValue, 8> MOps2(NumConcat, UndefVal); MOps1[0] = Src1; MOps2[0] = Src2; Src1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps1); Src2 = DAG.getNode(ISD::CONCAT_VECTORS, DL, PaddedVT, MOps2); // Readjust mask for new input vector length. SmallVector<int, 8> MappedOps(PaddedMaskNumElts, -1); for (unsigned i = 0; i != MaskNumElts; ++i) { int Idx = Mask[i]; if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts - PaddedMaskNumElts; MappedOps[i] = Idx; } SDValue Result = DAG.getVectorShuffle(PaddedVT, DL, Src1, Src2, MappedOps); // If the concatenated vector was padded, extract a subvector with the // correct number of elements. if (MaskNumElts != PaddedMaskNumElts) Result = DAG.getNode( ISD::EXTRACT_SUBVECTOR, DL, VT, Result, DAG.getConstant(0, DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); setValue(&I, Result); return; } if (SrcNumElts > MaskNumElts) { // Analyze the access pattern of the vector to see if we can extract // two subvectors and do the shuffle. int StartIdx[2] = { -1, -1 }; // StartIdx to extract from bool CanExtract = true; for (int Idx : Mask) { unsigned Input = 0; if (Idx < 0) continue; if (Idx >= (int)SrcNumElts) { Input = 1; Idx -= SrcNumElts; } // If all the indices come from the same MaskNumElts sized portion of // the sources we can use extract. Also make sure the extract wouldn't // extract past the end of the source. int NewStartIdx = alignDown(Idx, MaskNumElts); if (NewStartIdx + MaskNumElts > SrcNumElts || (StartIdx[Input] >= 0 && StartIdx[Input] != NewStartIdx)) CanExtract = false; // Make sure we always update StartIdx as we use it to track if all // elements are undef. StartIdx[Input] = NewStartIdx; } if (StartIdx[0] < 0 && StartIdx[1] < 0) { setValue(&I, DAG.getUNDEF(VT)); // Vectors are not used. return; } if (CanExtract) { // Extract appropriate subvector and generate a vector shuffle for (unsigned Input = 0; Input < 2; ++Input) { SDValue &Src = Input == 0 ? Src1 : Src2; if (StartIdx[Input] < 0) Src = DAG.getUNDEF(VT); else { Src = DAG.getNode( ISD::EXTRACT_SUBVECTOR, DL, VT, Src, DAG.getConstant(StartIdx[Input], DL, TLI.getVectorIdxTy(DAG.getDataLayout()))); } } // Calculate new mask. SmallVector<int, 8> MappedOps(Mask.begin(), Mask.end()); for (int &Idx : MappedOps) { if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts + StartIdx[1] - MaskNumElts; else if (Idx >= 0) Idx -= StartIdx[0]; } setValue(&I, DAG.getVectorShuffle(VT, DL, Src1, Src2, MappedOps)); return; } } // We can't use either concat vectors or extract subvectors so fall back to // replacing the shuffle with extract and build vector. // to insert and build vector. EVT EltVT = VT.getVectorElementType(); EVT IdxVT = TLI.getVectorIdxTy(DAG.getDataLayout()); SmallVector<SDValue,8> Ops; for (int Idx : Mask) { SDValue Res; if (Idx < 0) { Res = DAG.getUNDEF(EltVT); } else { SDValue &Src = Idx < (int)SrcNumElts ? Src1 : Src2; if (Idx >= (int)SrcNumElts) Idx -= SrcNumElts; Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Src, DAG.getConstant(Idx, DL, IdxVT)); } Ops.push_back(Res); } setValue(&I, DAG.getBuildVector(VT, DL, Ops)); } void SelectionDAGBuilder::visitInsertValue(const User &I) { ArrayRef<unsigned> Indices; if (const InsertValueInst *IV = dyn_cast<InsertValueInst>(&I)) Indices = IV->getIndices(); else Indices = cast<ConstantExpr>(&I)->getIndices(); const Value *Op0 = I.getOperand(0); const Value *Op1 = I.getOperand(1); Type *AggTy = I.getType(); Type *ValTy = Op1->getType(); bool IntoUndef = isa<UndefValue>(Op0); bool FromUndef = isa<UndefValue>(Op1); unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SmallVector<EVT, 4> AggValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), AggTy, AggValueVTs); SmallVector<EVT, 4> ValValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); unsigned NumAggValues = AggValueVTs.size(); unsigned NumValValues = ValValueVTs.size(); SmallVector<SDValue, 4> Values(NumAggValues); // Ignore an insertvalue that produces an empty object if (!NumAggValues) { setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); return; } SDValue Agg = getValue(Op0); unsigned i = 0; // Copy the beginning value(s) from the original aggregate. for (; i != LinearIndex; ++i) Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : SDValue(Agg.getNode(), Agg.getResNo() + i); // Copy values from the inserted value(s). if (NumValValues) { SDValue Val = getValue(Op1); for (; i != LinearIndex + NumValValues; ++i) Values[i] = FromUndef ? DAG.getUNDEF(AggValueVTs[i]) : SDValue(Val.getNode(), Val.getResNo() + i - LinearIndex); } // Copy remaining value(s) from the original aggregate. for (; i != NumAggValues; ++i) Values[i] = IntoUndef ? DAG.getUNDEF(AggValueVTs[i]) : SDValue(Agg.getNode(), Agg.getResNo() + i); setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), DAG.getVTList(AggValueVTs), Values)); } void SelectionDAGBuilder::visitExtractValue(const User &I) { ArrayRef<unsigned> Indices; if (const ExtractValueInst *EV = dyn_cast<ExtractValueInst>(&I)) Indices = EV->getIndices(); else Indices = cast<ConstantExpr>(&I)->getIndices(); const Value *Op0 = I.getOperand(0); Type *AggTy = Op0->getType(); Type *ValTy = I.getType(); bool OutOfUndef = isa<UndefValue>(Op0); unsigned LinearIndex = ComputeLinearIndex(AggTy, Indices); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SmallVector<EVT, 4> ValValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), ValTy, ValValueVTs); unsigned NumValValues = ValValueVTs.size(); // Ignore a extractvalue that produces an empty object if (!NumValValues) { setValue(&I, DAG.getUNDEF(MVT(MVT::Other))); return; } SmallVector<SDValue, 4> Values(NumValValues); SDValue Agg = getValue(Op0); // Copy out the selected value(s). for (unsigned i = LinearIndex; i != LinearIndex + NumValValues; ++i) Values[i - LinearIndex] = OutOfUndef ? DAG.getUNDEF(Agg.getNode()->getValueType(Agg.getResNo() + i)) : SDValue(Agg.getNode(), Agg.getResNo() + i); setValue(&I, DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), DAG.getVTList(ValValueVTs), Values)); } void SelectionDAGBuilder::visitGetElementPtr(const User &I) { Value *Op0 = I.getOperand(0); // Note that the pointer operand may be a vector of pointers. Take the scalar // element which holds a pointer. unsigned AS = Op0->getType()->getScalarType()->getPointerAddressSpace(); SDValue N = getValue(Op0); SDLoc dl = getCurSDLoc(); auto &TLI = DAG.getTargetLoweringInfo(); MVT PtrTy = TLI.getPointerTy(DAG.getDataLayout(), AS); MVT PtrMemTy = TLI.getPointerMemTy(DAG.getDataLayout(), AS); // Normalize Vector GEP - all scalar operands should be converted to the // splat vector. unsigned VectorWidth = I.getType()->isVectorTy() ? I.getType()->getVectorNumElements() : 0; if (VectorWidth && !N.getValueType().isVector()) { LLVMContext &Context = *DAG.getContext(); EVT VT = EVT::getVectorVT(Context, N.getValueType(), VectorWidth); N = DAG.getSplatBuildVector(VT, dl, N); } for (gep_type_iterator GTI = gep_type_begin(&I), E = gep_type_end(&I); GTI != E; ++GTI) { const Value *Idx = GTI.getOperand(); if (StructType *StTy = GTI.getStructTypeOrNull()) { unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue(); if (Field) { // N = N + Offset uint64_t Offset = DL->getStructLayout(StTy)->getElementOffset(Field); // In an inbounds GEP with an offset that is nonnegative even when // interpreted as signed, assume there is no unsigned overflow. SDNodeFlags Flags; if (int64_t(Offset) >= 0 && cast<GEPOperator>(I).isInBounds()) Flags.setNoUnsignedWrap(true); N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, DAG.getConstant(Offset, dl, N.getValueType()), Flags); } } else { unsigned IdxSize = DAG.getDataLayout().getIndexSizeInBits(AS); MVT IdxTy = MVT::getIntegerVT(IdxSize); APInt ElementSize(IdxSize, DL->getTypeAllocSize(GTI.getIndexedType())); // If this is a scalar constant or a splat vector of constants, // handle it quickly. const auto *C = dyn_cast<Constant>(Idx); if (C && isa<VectorType>(C->getType())) C = C->getSplatValue(); if (const auto *CI = dyn_cast_or_null<ConstantInt>(C)) { if (CI->isZero()) continue; APInt Offs = ElementSize * CI->getValue().sextOrTrunc(IdxSize); LLVMContext &Context = *DAG.getContext(); SDValue OffsVal = VectorWidth ? DAG.getConstant(Offs, dl, EVT::getVectorVT(Context, IdxTy, VectorWidth)) : DAG.getConstant(Offs, dl, IdxTy); // In an inbounds GEP with an offset that is nonnegative even when // interpreted as signed, assume there is no unsigned overflow. SDNodeFlags Flags; if (Offs.isNonNegative() && cast<GEPOperator>(I).isInBounds()) Flags.setNoUnsignedWrap(true); OffsVal = DAG.getSExtOrTrunc(OffsVal, dl, N.getValueType()); N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, OffsVal, Flags); continue; } // N = N + Idx * ElementSize; SDValue IdxN = getValue(Idx); if (!IdxN.getValueType().isVector() && VectorWidth) { EVT VT = EVT::getVectorVT(*Context, IdxN.getValueType(), VectorWidth); IdxN = DAG.getSplatBuildVector(VT, dl, IdxN); } // If the index is smaller or larger than intptr_t, truncate or extend // it. IdxN = DAG.getSExtOrTrunc(IdxN, dl, N.getValueType()); // If this is a multiply by a power of two, turn it into a shl // immediately. This is a very common case. if (ElementSize != 1) { if (ElementSize.isPowerOf2()) { unsigned Amt = ElementSize.logBase2(); IdxN = DAG.getNode(ISD::SHL, dl, N.getValueType(), IdxN, DAG.getConstant(Amt, dl, IdxN.getValueType())); } else { SDValue Scale = DAG.getConstant(ElementSize.getZExtValue(), dl, IdxN.getValueType()); IdxN = DAG.getNode(ISD::MUL, dl, N.getValueType(), IdxN, Scale); } } N = DAG.getNode(ISD::ADD, dl, N.getValueType(), N, IdxN); } } if (PtrMemTy != PtrTy && !cast<GEPOperator>(I).isInBounds()) N = DAG.getPtrExtendInReg(N, dl, PtrMemTy); setValue(&I, N); } void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) { // If this is a fixed sized alloca in the entry block of the function, // allocate it statically on the stack. if (FuncInfo.StaticAllocaMap.count(&I)) return; // getValue will auto-populate this. SDLoc dl = getCurSDLoc(); Type *Ty = I.getAllocatedType(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); auto &DL = DAG.getDataLayout(); uint64_t TySize = DL.getTypeAllocSize(Ty); unsigned Align = std::max((unsigned)DL.getPrefTypeAlignment(Ty), I.getAlignment()); SDValue AllocSize = getValue(I.getArraySize()); EVT IntPtr = TLI.getPointerTy(DAG.getDataLayout(), DL.getAllocaAddrSpace()); if (AllocSize.getValueType() != IntPtr) AllocSize = DAG.getZExtOrTrunc(AllocSize, dl, IntPtr); AllocSize = DAG.getNode(ISD::MUL, dl, IntPtr, AllocSize, DAG.getConstant(TySize, dl, IntPtr)); // Handle alignment. If the requested alignment is less than or equal to // the stack alignment, ignore it. If the size is greater than or equal to // the stack alignment, we note this in the DYNAMIC_STACKALLOC node. unsigned StackAlign = DAG.getSubtarget().getFrameLowering()->getStackAlignment(); if (Align <= StackAlign) Align = 0; // Round the size of the allocation up to the stack alignment size // by add SA-1 to the size. This doesn't overflow because we're computing // an address inside an alloca. SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); AllocSize = DAG.getNode(ISD::ADD, dl, AllocSize.getValueType(), AllocSize, DAG.getConstant(StackAlign - 1, dl, IntPtr), Flags); // Mask out the low bits for alignment purposes. AllocSize = DAG.getNode(ISD::AND, dl, AllocSize.getValueType(), AllocSize, DAG.getConstant(~(uint64_t)(StackAlign - 1), dl, IntPtr)); SDValue Ops[] = {getRoot(), AllocSize, DAG.getConstant(Align, dl, IntPtr)}; SDVTList VTs = DAG.getVTList(AllocSize.getValueType(), MVT::Other); SDValue DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, dl, VTs, Ops); setValue(&I, DSA); DAG.setRoot(DSA.getValue(1)); assert(FuncInfo.MF->getFrameInfo().hasVarSizedObjects()); } void SelectionDAGBuilder::visitLoad(const LoadInst &I) { if (I.isAtomic()) return visitAtomicLoad(I); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const Value *SV = I.getOperand(0); if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. if (const Argument *Arg = dyn_cast<Argument>(SV)) { if (Arg->hasSwiftErrorAttr()) return visitLoadFromSwiftError(I); } if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) { if (Alloca->isSwiftError()) return visitLoadFromSwiftError(I); } } SDValue Ptr = getValue(SV); Type *Ty = I.getType(); bool isVolatile = I.isVolatile(); bool isNonTemporal = I.hasMetadata(LLVMContext::MD_nontemporal); bool isInvariant = I.hasMetadata(LLVMContext::MD_invariant_load); bool isDereferenceable = isDereferenceablePointer(SV, I.getType(), DAG.getDataLayout()); unsigned Alignment = I.getAlignment(); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); SmallVector<EVT, 4> ValueVTs, MemVTs; SmallVector<uint64_t, 4> Offsets; ComputeValueVTs(TLI, DAG.getDataLayout(), Ty, ValueVTs, &MemVTs, &Offsets); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; SDValue Root; bool ConstantMemory = false; if (isVolatile || NumValues > MaxParallelChains) // Serialize volatile loads with other side effects. Root = getRoot(); else if (AA && AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) { // Do not serialize (non-volatile) loads of constant memory with anything. Root = DAG.getEntryNode(); ConstantMemory = true; } else { // Do not serialize non-volatile loads against each other. Root = DAG.getRoot(); } SDLoc dl = getCurSDLoc(); if (isVolatile) Root = TLI.prepareVolatileOrAtomicLoad(Root, dl, DAG); // An aggregate load cannot wrap around the address space, so offsets to its // parts don't wrap either. SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); SmallVector<SDValue, 4> Values(NumValues); SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); EVT PtrVT = Ptr.getValueType(); unsigned ChainI = 0; for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { // Serializing loads here may result in excessive register pressure, and // TokenFactor places arbitrary choke points on the scheduler. SD scheduling // could recover a bit by hoisting nodes upward in the chain by recognizing // they are side-effect free or do not alias. The optimizer should really // avoid this case by converting large object/array copies to llvm.memcpy // (MaxParallelChains should always remain as failsafe). if (ChainI == MaxParallelChains) { assert(PendingLoads.empty() && "PendingLoads must be serialized first"); SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, makeArrayRef(Chains.data(), ChainI)); Root = Chain; ChainI = 0; } SDValue A = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, DAG.getConstant(Offsets[i], dl, PtrVT), Flags); auto MMOFlags = MachineMemOperand::MONone; if (isVolatile) MMOFlags |= MachineMemOperand::MOVolatile; if (isNonTemporal) MMOFlags |= MachineMemOperand::MONonTemporal; if (isInvariant) MMOFlags |= MachineMemOperand::MOInvariant; if (isDereferenceable) MMOFlags |= MachineMemOperand::MODereferenceable; MMOFlags |= TLI.getMMOFlags(I); SDValue L = DAG.getLoad(MemVTs[i], dl, Root, A, MachinePointerInfo(SV, Offsets[i]), Alignment, MMOFlags, AAInfo, Ranges); Chains[ChainI] = L.getValue(1); if (MemVTs[i] != ValueVTs[i]) L = DAG.getZExtOrTrunc(L, dl, ValueVTs[i]); Values[i] = L; } if (!ConstantMemory) { SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, makeArrayRef(Chains.data(), ChainI)); if (isVolatile) DAG.setRoot(Chain); else PendingLoads.push_back(Chain); } setValue(&I, DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(ValueVTs), Values)); } void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) { assert(DAG.getTargetLoweringInfo().supportSwiftError() && "call visitStoreToSwiftError when backend supports swifterror"); SmallVector<EVT, 4> ValueVTs; SmallVector<uint64_t, 4> Offsets; const Value *SrcV = I.getOperand(0); ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), SrcV->getType(), ValueVTs, &Offsets); assert(ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"); SDValue Src = getValue(SrcV); // Create a virtual register, then update the virtual register. Register VReg = SwiftError.getOrCreateVRegDefAt(&I, FuncInfo.MBB, I.getPointerOperand()); // Chain, DL, Reg, N or Chain, DL, Reg, N, Glue // Chain can be getRoot or getControlRoot. SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg, SDValue(Src.getNode(), Src.getResNo())); DAG.setRoot(CopyNode); } void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) { assert(DAG.getTargetLoweringInfo().supportSwiftError() && "call visitLoadFromSwiftError when backend supports swifterror"); assert(!I.isVolatile() && !I.hasMetadata(LLVMContext::MD_nontemporal) && !I.hasMetadata(LLVMContext::MD_invariant_load) && "Support volatile, non temporal, invariant for load_from_swift_error"); const Value *SV = I.getOperand(0); Type *Ty = I.getType(); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); assert( (!AA || !AA->pointsToConstantMemory(MemoryLocation( SV, LocationSize::precise(DAG.getDataLayout().getTypeStoreSize(Ty)), AAInfo))) && "load_from_swift_error should not be constant memory"); SmallVector<EVT, 4> ValueVTs; SmallVector<uint64_t, 4> Offsets; ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), Ty, ValueVTs, &Offsets); assert(ValueVTs.size() == 1 && Offsets[0] == 0 && "expect a single EVT for swifterror"); // Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT SDValue L = DAG.getCopyFromReg( getRoot(), getCurSDLoc(), SwiftError.getOrCreateVRegUseAt(&I, FuncInfo.MBB, SV), ValueVTs[0]); setValue(&I, L); } void SelectionDAGBuilder::visitStore(const StoreInst &I) { if (I.isAtomic()) return visitAtomicStore(I); const Value *SrcV = I.getOperand(0); const Value *PtrV = I.getOperand(1); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); if (TLI.supportSwiftError()) { // Swifterror values can come from either a function parameter with // swifterror attribute or an alloca with swifterror attribute. if (const Argument *Arg = dyn_cast<Argument>(PtrV)) { if (Arg->hasSwiftErrorAttr()) return visitStoreToSwiftError(I); } if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) { if (Alloca->isSwiftError()) return visitStoreToSwiftError(I); } } SmallVector<EVT, 4> ValueVTs, MemVTs; SmallVector<uint64_t, 4> Offsets; ComputeValueVTs(DAG.getTargetLoweringInfo(), DAG.getDataLayout(), SrcV->getType(), ValueVTs, &MemVTs, &Offsets); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) return; // Get the lowered operands. Note that we do this after // checking if NumResults is zero, because with zero results // the operands won't have values in the map. SDValue Src = getValue(SrcV); SDValue Ptr = getValue(PtrV); SDValue Root = getRoot(); SmallVector<SDValue, 4> Chains(std::min(MaxParallelChains, NumValues)); SDLoc dl = getCurSDLoc(); EVT PtrVT = Ptr.getValueType(); unsigned Alignment = I.getAlignment(); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); auto MMOFlags = MachineMemOperand::MONone; if (I.isVolatile()) MMOFlags |= MachineMemOperand::MOVolatile; if (I.hasMetadata(LLVMContext::MD_nontemporal)) MMOFlags |= MachineMemOperand::MONonTemporal; MMOFlags |= TLI.getMMOFlags(I); // An aggregate load cannot wrap around the address space, so offsets to its // parts don't wrap either. SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); unsigned ChainI = 0; for (unsigned i = 0; i != NumValues; ++i, ++ChainI) { // See visitLoad comments. if (ChainI == MaxParallelChains) { SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, makeArrayRef(Chains.data(), ChainI)); Root = Chain; ChainI = 0; } SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, Ptr, DAG.getConstant(Offsets[i], dl, PtrVT), Flags); SDValue Val = SDValue(Src.getNode(), Src.getResNo() + i); if (MemVTs[i] != ValueVTs[i]) Val = DAG.getPtrExtOrTrunc(Val, dl, MemVTs[i]); SDValue St = DAG.getStore(Root, dl, Val, Add, MachinePointerInfo(PtrV, Offsets[i]), Alignment, MMOFlags, AAInfo); Chains[ChainI] = St; } SDValue StoreNode = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, makeArrayRef(Chains.data(), ChainI)); DAG.setRoot(StoreNode); } void SelectionDAGBuilder::visitMaskedStore(const CallInst &I, bool IsCompressing) { SDLoc sdl = getCurSDLoc(); auto getMaskedStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, unsigned& Alignment) { // llvm.masked.store.*(Src0, Ptr, alignment, Mask) Src0 = I.getArgOperand(0); Ptr = I.getArgOperand(1); Alignment = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue(); Mask = I.getArgOperand(3); }; auto getCompressingStoreOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, unsigned& Alignment) { // llvm.masked.compressstore.*(Src0, Ptr, Mask) Src0 = I.getArgOperand(0); Ptr = I.getArgOperand(1); Mask = I.getArgOperand(2); Alignment = 0; }; Value *PtrOperand, *MaskOperand, *Src0Operand; unsigned Alignment; if (IsCompressing) getCompressingStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); else getMaskedStoreOps(PtrOperand, MaskOperand, Src0Operand, Alignment); SDValue Ptr = getValue(PtrOperand); SDValue Src0 = getValue(Src0Operand); SDValue Mask = getValue(MaskOperand); EVT VT = Src0.getValueType(); if (!Alignment) Alignment = DAG.getEVTAlignment(VT); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); MachineMemOperand *MMO = DAG.getMachineFunction(). getMachineMemOperand(MachinePointerInfo(PtrOperand), MachineMemOperand::MOStore, VT.getStoreSize(), Alignment, AAInfo); SDValue StoreNode = DAG.getMaskedStore(getRoot(), sdl, Src0, Ptr, Mask, VT, MMO, false /* Truncating */, IsCompressing); DAG.setRoot(StoreNode); setValue(&I, StoreNode); } // Get a uniform base for the Gather/Scatter intrinsic. // The first argument of the Gather/Scatter intrinsic is a vector of pointers. // We try to represent it as a base pointer + vector of indices. // Usually, the vector of pointers comes from a 'getelementptr' instruction. // The first operand of the GEP may be a single pointer or a vector of pointers // Example: // %gep.ptr = getelementptr i32, <8 x i32*> %vptr, <8 x i32> %ind // or // %gep.ptr = getelementptr i32, i32* %ptr, <8 x i32> %ind // %res = call <8 x i32> @llvm.masked.gather.v8i32(<8 x i32*> %gep.ptr, .. // // When the first GEP operand is a single pointer - it is the uniform base we // are looking for. If first operand of the GEP is a splat vector - we // extract the splat value and use it as a uniform base. // In all other cases the function returns 'false'. static bool getUniformBase(const Value *&Ptr, SDValue &Base, SDValue &Index, ISD::MemIndexType &IndexType, SDValue &Scale, SelectionDAGBuilder *SDB) { SelectionDAG& DAG = SDB->DAG; LLVMContext &Context = *DAG.getContext(); assert(Ptr->getType()->isVectorTy() && "Uexpected pointer type"); const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Ptr); if (!GEP) return false; const Value *GEPPtr = GEP->getPointerOperand(); if (!GEPPtr->getType()->isVectorTy()) Ptr = GEPPtr; else if (!(Ptr = getSplatValue(GEPPtr))) return false; unsigned FinalIndex = GEP->getNumOperands() - 1; Value *IndexVal = GEP->getOperand(FinalIndex); // Ensure all the other indices are 0. for (unsigned i = 1; i < FinalIndex; ++i) { auto *C = dyn_cast<Constant>(GEP->getOperand(i)); if (!C) return false; if (isa<VectorType>(C->getType())) C = C->getSplatValue(); auto *CI = dyn_cast_or_null<ConstantInt>(C); if (!CI || !CI->isZero()) return false; } // The operands of the GEP may be defined in another basic block. // In this case we'll not find nodes for the operands. if (!SDB->findValue(Ptr) || !SDB->findValue(IndexVal)) return false; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const DataLayout &DL = DAG.getDataLayout(); Scale = DAG.getTargetConstant(DL.getTypeAllocSize(GEP->getResultElementType()), SDB->getCurSDLoc(), TLI.getPointerTy(DL)); Base = SDB->getValue(Ptr); Index = SDB->getValue(IndexVal); IndexType = ISD::SIGNED_SCALED; if (!Index.getValueType().isVector()) { unsigned GEPWidth = GEP->getType()->getVectorNumElements(); EVT VT = EVT::getVectorVT(Context, Index.getValueType(), GEPWidth); Index = DAG.getSplatBuildVector(VT, SDLoc(Index), Index); } return true; } void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) { SDLoc sdl = getCurSDLoc(); // llvm.masked.scatter.*(Src0, Ptrs, alignemt, Mask) const Value *Ptr = I.getArgOperand(1); SDValue Src0 = getValue(I.getArgOperand(0)); SDValue Mask = getValue(I.getArgOperand(3)); EVT VT = Src0.getValueType(); unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(2)))->getZExtValue(); if (!Alignment) Alignment = DAG.getEVTAlignment(VT); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); SDValue Base; SDValue Index; ISD::MemIndexType IndexType; SDValue Scale; const Value *BasePtr = Ptr; bool UniformBase = getUniformBase(BasePtr, Base, Index, IndexType, Scale, this); const Value *MemOpBasePtr = UniformBase ? BasePtr : nullptr; MachineMemOperand *MMO = DAG.getMachineFunction(). getMachineMemOperand(MachinePointerInfo(MemOpBasePtr), MachineMemOperand::MOStore, VT.getStoreSize(), Alignment, AAInfo); if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } SDValue Ops[] = { getRoot(), Src0, Mask, Base, Index, Scale }; SDValue Scatter = DAG.getMaskedScatter(DAG.getVTList(MVT::Other), VT, sdl, Ops, MMO, IndexType); DAG.setRoot(Scatter); setValue(&I, Scatter); } void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) { SDLoc sdl = getCurSDLoc(); auto getMaskedLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, unsigned& Alignment) { // @llvm.masked.load.*(Ptr, alignment, Mask, Src0) Ptr = I.getArgOperand(0); Alignment = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); Mask = I.getArgOperand(2); Src0 = I.getArgOperand(3); }; auto getExpandingLoadOps = [&](Value* &Ptr, Value* &Mask, Value* &Src0, unsigned& Alignment) { // @llvm.masked.expandload.*(Ptr, Mask, Src0) Ptr = I.getArgOperand(0); Alignment = 0; Mask = I.getArgOperand(1); Src0 = I.getArgOperand(2); }; Value *PtrOperand, *MaskOperand, *Src0Operand; unsigned Alignment; if (IsExpanding) getExpandingLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); else getMaskedLoadOps(PtrOperand, MaskOperand, Src0Operand, Alignment); SDValue Ptr = getValue(PtrOperand); SDValue Src0 = getValue(Src0Operand); SDValue Mask = getValue(MaskOperand); EVT VT = Src0.getValueType(); if (!Alignment) Alignment = DAG.getEVTAlignment(VT); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); // Do not serialize masked loads of constant memory with anything. bool AddToChain = !AA || !AA->pointsToConstantMemory(MemoryLocation( PtrOperand, LocationSize::precise( DAG.getDataLayout().getTypeStoreSize(I.getType())), AAInfo)); SDValue InChain = AddToChain ? DAG.getRoot() : DAG.getEntryNode(); MachineMemOperand *MMO = DAG.getMachineFunction(). getMachineMemOperand(MachinePointerInfo(PtrOperand), MachineMemOperand::MOLoad, VT.getStoreSize(), Alignment, AAInfo, Ranges); SDValue Load = DAG.getMaskedLoad(VT, sdl, InChain, Ptr, Mask, Src0, VT, MMO, ISD::NON_EXTLOAD, IsExpanding); if (AddToChain) PendingLoads.push_back(Load.getValue(1)); setValue(&I, Load); } void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) { SDLoc sdl = getCurSDLoc(); // @llvm.masked.gather.*(Ptrs, alignment, Mask, Src0) const Value *Ptr = I.getArgOperand(0); SDValue Src0 = getValue(I.getArgOperand(3)); SDValue Mask = getValue(I.getArgOperand(2)); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); unsigned Alignment = (cast<ConstantInt>(I.getArgOperand(1)))->getZExtValue(); if (!Alignment) Alignment = DAG.getEVTAlignment(VT); AAMDNodes AAInfo; I.getAAMetadata(AAInfo); const MDNode *Ranges = I.getMetadata(LLVMContext::MD_range); SDValue Root = DAG.getRoot(); SDValue Base; SDValue Index; ISD::MemIndexType IndexType; SDValue Scale; const Value *BasePtr = Ptr; bool UniformBase = getUniformBase(BasePtr, Base, Index, IndexType, Scale, this); bool ConstantMemory = false; if (UniformBase && AA && AA->pointsToConstantMemory( MemoryLocation(BasePtr, LocationSize::precise( DAG.getDataLayout().getTypeStoreSize(I.getType())), AAInfo))) { // Do not serialize (non-volatile) loads of constant memory with anything. Root = DAG.getEntryNode(); ConstantMemory = true; } MachineMemOperand *MMO = DAG.getMachineFunction(). getMachineMemOperand(MachinePointerInfo(UniformBase ? BasePtr : nullptr), MachineMemOperand::MOLoad, VT.getStoreSize(), Alignment, AAInfo, Ranges); if (!UniformBase) { Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())); Index = getValue(Ptr); IndexType = ISD::SIGNED_SCALED; Scale = DAG.getTargetConstant(1, sdl, TLI.getPointerTy(DAG.getDataLayout())); } SDValue Ops[] = { Root, Src0, Mask, Base, Index, Scale }; SDValue Gather = DAG.getMaskedGather(DAG.getVTList(VT, MVT::Other), VT, sdl, Ops, MMO, IndexType); SDValue OutChain = Gather.getValue(1); if (!ConstantMemory) PendingLoads.push_back(OutChain); setValue(&I, Gather); } void SelectionDAGBuilder::visitAtomicCmpXchg(const AtomicCmpXchgInst &I) { SDLoc dl = getCurSDLoc(); AtomicOrdering SuccessOrdering = I.getSuccessOrdering(); AtomicOrdering FailureOrdering = I.getFailureOrdering(); SyncScope::ID SSID = I.getSyncScopeID(); SDValue InChain = getRoot(); MVT MemVT = getValue(I.getCompareOperand()).getSimpleValueType(); SDVTList VTs = DAG.getVTList(MemVT, MVT::i1, MVT::Other); auto Alignment = DAG.getEVTAlignment(MemVT); auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; if (I.isVolatile()) Flags |= MachineMemOperand::MOVolatile; Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I); MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), Alignment, AAMDNodes(), nullptr, SSID, SuccessOrdering, FailureOrdering); SDValue L = DAG.getAtomicCmpSwap(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, dl, MemVT, VTs, InChain, getValue(I.getPointerOperand()), getValue(I.getCompareOperand()), getValue(I.getNewValOperand()), MMO); SDValue OutChain = L.getValue(2); setValue(&I, L); DAG.setRoot(OutChain); } void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) { SDLoc dl = getCurSDLoc(); ISD::NodeType NT; switch (I.getOperation()) { default: llvm_unreachable("Unknown atomicrmw operation"); case AtomicRMWInst::Xchg: NT = ISD::ATOMIC_SWAP; break; case AtomicRMWInst::Add: NT = ISD::ATOMIC_LOAD_ADD; break; case AtomicRMWInst::Sub: NT = ISD::ATOMIC_LOAD_SUB; break; case AtomicRMWInst::And: NT = ISD::ATOMIC_LOAD_AND; break; case AtomicRMWInst::Nand: NT = ISD::ATOMIC_LOAD_NAND; break; case AtomicRMWInst::Or: NT = ISD::ATOMIC_LOAD_OR; break; case AtomicRMWInst::Xor: NT = ISD::ATOMIC_LOAD_XOR; break; case AtomicRMWInst::Max: NT = ISD::ATOMIC_LOAD_MAX; break; case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break; case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break; case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break; case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break; case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break; } AtomicOrdering Ordering = I.getOrdering(); SyncScope::ID SSID = I.getSyncScopeID(); SDValue InChain = getRoot(); auto MemVT = getValue(I.getValOperand()).getSimpleValueType(); auto Alignment = DAG.getEVTAlignment(MemVT); auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore; if (I.isVolatile()) Flags |= MachineMemOperand::MOVolatile; Flags |= DAG.getTargetLoweringInfo().getMMOFlags(I); MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), Alignment, AAMDNodes(), nullptr, SSID, Ordering); SDValue L = DAG.getAtomic(NT, dl, MemVT, InChain, getValue(I.getPointerOperand()), getValue(I.getValOperand()), MMO); SDValue OutChain = L.getValue(1); setValue(&I, L); DAG.setRoot(OutChain); } void SelectionDAGBuilder::visitFence(const FenceInst &I) { SDLoc dl = getCurSDLoc(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Ops[3]; Ops[0] = getRoot(); Ops[1] = DAG.getConstant((unsigned)I.getOrdering(), dl, TLI.getFenceOperandTy(DAG.getDataLayout())); Ops[2] = DAG.getConstant(I.getSyncScopeID(), dl, TLI.getFenceOperandTy(DAG.getDataLayout())); DAG.setRoot(DAG.getNode(ISD::ATOMIC_FENCE, dl, MVT::Other, Ops)); } void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) { SDLoc dl = getCurSDLoc(); AtomicOrdering Order = I.getOrdering(); SyncScope::ID SSID = I.getSyncScopeID(); SDValue InChain = getRoot(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getType()); if (!TLI.supportsUnalignedAtomics() && I.getAlignment() < MemVT.getSizeInBits() / 8) report_fatal_error("Cannot generate unaligned atomic load"); auto Flags = MachineMemOperand::MOLoad; if (I.isVolatile()) Flags |= MachineMemOperand::MOVolatile; if (I.hasMetadata(LLVMContext::MD_invariant_load)) Flags |= MachineMemOperand::MOInvariant; if (isDereferenceablePointer(I.getPointerOperand(), I.getType(), DAG.getDataLayout())) Flags |= MachineMemOperand::MODereferenceable; Flags |= TLI.getMMOFlags(I); MachineMemOperand *MMO = DAG.getMachineFunction(). getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), I.getAlignment() ? I.getAlignment() : DAG.getEVTAlignment(MemVT), AAMDNodes(), nullptr, SSID, Order); InChain = TLI.prepareVolatileOrAtomicLoad(InChain, dl, DAG); SDValue Ptr = getValue(I.getPointerOperand()); if (TLI.lowerAtomicLoadAsLoadSDNode(I)) { // TODO: Once this is better exercised by tests, it should be merged with // the normal path for loads to prevent future divergence. SDValue L = DAG.getLoad(MemVT, dl, InChain, Ptr, MMO); if (MemVT != VT) L = DAG.getPtrExtOrTrunc(L, dl, VT); setValue(&I, L); if (!I.isUnordered()) { SDValue OutChain = L.getValue(1); DAG.setRoot(OutChain); } return; } SDValue L = DAG.getAtomic(ISD::ATOMIC_LOAD, dl, MemVT, MemVT, InChain, Ptr, MMO); SDValue OutChain = L.getValue(1); if (MemVT != VT) L = DAG.getPtrExtOrTrunc(L, dl, VT); setValue(&I, L); DAG.setRoot(OutChain); } void SelectionDAGBuilder::visitAtomicStore(const StoreInst &I) { SDLoc dl = getCurSDLoc(); AtomicOrdering Ordering = I.getOrdering(); SyncScope::ID SSID = I.getSyncScopeID(); SDValue InChain = getRoot(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); EVT MemVT = TLI.getMemValueType(DAG.getDataLayout(), I.getValueOperand()->getType()); if (I.getAlignment() < MemVT.getSizeInBits() / 8) report_fatal_error("Cannot generate unaligned atomic store"); auto Flags = MachineMemOperand::MOStore; if (I.isVolatile()) Flags |= MachineMemOperand::MOVolatile; Flags |= TLI.getMMOFlags(I); MachineFunction &MF = DAG.getMachineFunction(); MachineMemOperand *MMO = MF.getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()), Flags, MemVT.getStoreSize(), I.getAlignment(), AAMDNodes(), nullptr, SSID, Ordering); SDValue Val = getValue(I.getValueOperand()); if (Val.getValueType() != MemVT) Val = DAG.getPtrExtOrTrunc(Val, dl, MemVT); SDValue Ptr = getValue(I.getPointerOperand()); if (TLI.lowerAtomicStoreAsStoreSDNode(I)) { // TODO: Once this is better exercised by tests, it should be merged with // the normal path for stores to prevent future divergence. SDValue S = DAG.getStore(InChain, dl, Val, Ptr, MMO); DAG.setRoot(S); return; } SDValue OutChain = DAG.getAtomic(ISD::ATOMIC_STORE, dl, MemVT, InChain, Ptr, Val, MMO); DAG.setRoot(OutChain); } /// visitTargetIntrinsic - Lower a call of a target intrinsic to an INTRINSIC /// node. void SelectionDAGBuilder::visitTargetIntrinsic(const CallInst &I, unsigned Intrinsic) { // Ignore the callsite's attributes. A specific call site may be marked with // readnone, but the lowering code will expect the chain based on the // definition. const Function *F = I.getCalledFunction(); bool HasChain = !F->doesNotAccessMemory(); bool OnlyLoad = HasChain && F->onlyReadsMemory(); // Build the operand list. SmallVector<SDValue, 8> Ops; if (HasChain) { // If this intrinsic has side-effects, chainify it. if (OnlyLoad) { // We don't need to serialize loads against other loads. Ops.push_back(DAG.getRoot()); } else { Ops.push_back(getRoot()); } } // Info is set by getTgtMemInstrinsic TargetLowering::IntrinsicInfo Info; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); bool IsTgtIntrinsic = TLI.getTgtMemIntrinsic(Info, I, DAG.getMachineFunction(), Intrinsic); // Add the intrinsic ID as an integer operand if it's not a target intrinsic. if (!IsTgtIntrinsic || Info.opc == ISD::INTRINSIC_VOID || Info.opc == ISD::INTRINSIC_W_CHAIN) Ops.push_back(DAG.getTargetConstant(Intrinsic, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); // Add all operands of the call to the operand list. for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { const Value *Arg = I.getArgOperand(i); if (!I.paramHasAttr(i, Attribute::ImmArg)) { Ops.push_back(getValue(Arg)); continue; } // Use TargetConstant instead of a regular constant for immarg. EVT VT = TLI.getValueType(*DL, Arg->getType(), true); if (const ConstantInt *CI = dyn_cast<ConstantInt>(Arg)) { assert(CI->getBitWidth() <= 64 && "large intrinsic immediates not handled"); Ops.push_back(DAG.getTargetConstant(*CI, SDLoc(), VT)); } else { Ops.push_back( DAG.getTargetConstantFP(*cast<ConstantFP>(Arg), SDLoc(), VT)); } } SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), I.getType(), ValueVTs); if (HasChain) ValueVTs.push_back(MVT::Other); SDVTList VTs = DAG.getVTList(ValueVTs); // Create the node. SDValue Result; if (IsTgtIntrinsic) { // This is target intrinsic that touches memory AAMDNodes AAInfo; I.getAAMetadata(AAInfo); Result = DAG.getMemIntrinsicNode( Info.opc, getCurSDLoc(), VTs, Ops, Info.memVT, MachinePointerInfo(Info.ptrVal, Info.offset), Info.align ? Info.align->value() : 0, Info.flags, Info.size, AAInfo); } else if (!HasChain) { Result = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, getCurSDLoc(), VTs, Ops); } else if (!I.getType()->isVoidTy()) { Result = DAG.getNode(ISD::INTRINSIC_W_CHAIN, getCurSDLoc(), VTs, Ops); } else { Result = DAG.getNode(ISD::INTRINSIC_VOID, getCurSDLoc(), VTs, Ops); } if (HasChain) { SDValue Chain = Result.getValue(Result.getNode()->getNumValues()-1); if (OnlyLoad) PendingLoads.push_back(Chain); else DAG.setRoot(Chain); } if (!I.getType()->isVoidTy()) { if (VectorType *PTy = dyn_cast<VectorType>(I.getType())) { EVT VT = TLI.getValueType(DAG.getDataLayout(), PTy); Result = DAG.getNode(ISD::BITCAST, getCurSDLoc(), VT, Result); } else Result = lowerRangeToAssertZExt(DAG, I, Result); setValue(&I, Result); } } /// GetSignificand - Get the significand and build it into a floating-point /// number with exponent of 1: /// /// Op = (Op & 0x007fffff) | 0x3f800000; /// /// where Op is the hexadecimal representation of floating point value. static SDValue GetSignificand(SelectionDAG &DAG, SDValue Op, const SDLoc &dl) { SDValue t1 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, DAG.getConstant(0x007fffff, dl, MVT::i32)); SDValue t2 = DAG.getNode(ISD::OR, dl, MVT::i32, t1, DAG.getConstant(0x3f800000, dl, MVT::i32)); return DAG.getNode(ISD::BITCAST, dl, MVT::f32, t2); } /// GetExponent - Get the exponent: /// /// (float)(int)(((Op & 0x7f800000) >> 23) - 127); /// /// where Op is the hexadecimal representation of floating point value. static SDValue GetExponent(SelectionDAG &DAG, SDValue Op, const TargetLowering &TLI, const SDLoc &dl) { SDValue t0 = DAG.getNode(ISD::AND, dl, MVT::i32, Op, DAG.getConstant(0x7f800000, dl, MVT::i32)); SDValue t1 = DAG.getNode( ISD::SRL, dl, MVT::i32, t0, DAG.getConstant(23, dl, TLI.getPointerTy(DAG.getDataLayout()))); SDValue t2 = DAG.getNode(ISD::SUB, dl, MVT::i32, t1, DAG.getConstant(127, dl, MVT::i32)); return DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, t2); } /// getF32Constant - Get 32-bit floating point constant. static SDValue getF32Constant(SelectionDAG &DAG, unsigned Flt, const SDLoc &dl) { return DAG.getConstantFP(APFloat(APFloat::IEEEsingle(), APInt(32, Flt)), dl, MVT::f32); } static SDValue getLimitedPrecisionExp2(SDValue t0, const SDLoc &dl, SelectionDAG &DAG) { // TODO: What fast-math-flags should be set on the floating-point nodes? // IntegerPartOfX = ((int32_t)(t0); SDValue IntegerPartOfX = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, t0); // FractionalPartOfX = t0 - (float)IntegerPartOfX; SDValue t1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, IntegerPartOfX); SDValue X = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, t1); // IntegerPartOfX <<= 23; IntegerPartOfX = DAG.getNode( ISD::SHL, dl, MVT::i32, IntegerPartOfX, DAG.getConstant(23, dl, DAG.getTargetLoweringInfo().getPointerTy( DAG.getDataLayout()))); SDValue TwoToFractionalPartOfX; if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // // TwoToFractionalPartOfX = // 0.997535578f + // (0.735607626f + 0.252464424f * x) * x; // // error 0.0144103317, which is 6 bits SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0x3e814304, dl)); SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, getF32Constant(DAG, 0x3f3c50c8, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x3f7f5e7e, dl)); } else if (LimitFloatPrecision <= 12) { // For floating-point precision of 12: // // TwoToFractionalPartOfX = // 0.999892986f + // (0.696457318f + // (0.224338339f + 0.792043434e-1f * x) * x) * x; // // error 0.000107046256, which is 13 to 14 bits SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0x3da235e3, dl)); SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, getF32Constant(DAG, 0x3e65b8f3, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x3f324b07, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, getF32Constant(DAG, 0x3f7ff8fd, dl)); } else { // LimitFloatPrecision <= 18 // For floating-point precision of 18: // // TwoToFractionalPartOfX = // 0.999999982f + // (0.693148872f + // (0.240227044f + // (0.554906021e-1f + // (0.961591928e-2f + // (0.136028312e-2f + 0.157059148e-3f *x)*x)*x)*x)*x)*x; // error 2.47208000*10^(-7), which is better than 18 bits SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0x3924b03e, dl)); SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, getF32Constant(DAG, 0x3ab24b87, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x3c1d8c17, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, getF32Constant(DAG, 0x3d634a1d, dl)); SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, getF32Constant(DAG, 0x3e75fe14, dl)); SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); SDValue t11 = DAG.getNode(ISD::FADD, dl, MVT::f32, t10, getF32Constant(DAG, 0x3f317234, dl)); SDValue t12 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t11, X); TwoToFractionalPartOfX = DAG.getNode(ISD::FADD, dl, MVT::f32, t12, getF32Constant(DAG, 0x3f800000, dl)); } // Add the exponent into the result in integer domain. SDValue t13 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, TwoToFractionalPartOfX); return DAG.getNode(ISD::BITCAST, dl, MVT::f32, DAG.getNode(ISD::ADD, dl, MVT::i32, t13, IntegerPartOfX)); } /// expandExp - Lower an exp intrinsic. Handles the special sequences for /// limited-precision mode. static SDValue expandExp(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI) { if (Op.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { // Put the exponent in the right bit position for later addition to the // final result: // // #define LOG2OFe 1.4426950f // t0 = Op * LOG2OFe // TODO: What fast-math-flags should be set here? SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, Op, getF32Constant(DAG, 0x3fb8aa3b, dl)); return getLimitedPrecisionExp2(t0, dl, DAG); } // No special expansion. return DAG.getNode(ISD::FEXP, dl, Op.getValueType(), Op); } /// expandLog - Lower a log intrinsic. Handles the special sequences for /// limited-precision mode. static SDValue expandLog(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI) { // TODO: What fast-math-flags should be set on the floating-point nodes? if (Op.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); // Scale the exponent by log(2) [0.69314718f]. SDValue Exp = GetExponent(DAG, Op1, TLI, dl); SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, getF32Constant(DAG, 0x3f317218, dl)); // Get the significand and build it into a floating-point number with // exponent of 1. SDValue X = GetSignificand(DAG, Op1, dl); SDValue LogOfMantissa; if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // // LogofMantissa = // -1.1609546f + // (1.4034025f - 0.23903021f * x) * x; // // error 0.0034276066, which is better than 8 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbe74c456, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x3fb3a2b1, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x3f949a29, dl)); } else if (LimitFloatPrecision <= 12) { // For floating-point precision of 12: // // LogOfMantissa = // -1.7417939f + // (2.8212026f + // (-1.4699568f + // (0.44717955f - 0.56570851e-1f * x) * x) * x) * x; // // error 0.000061011436, which is 14 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbd67b6d6, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x3ee4f4b8, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x3fbc278b, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x40348e95, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, getF32Constant(DAG, 0x3fdef31a, dl)); } else { // LimitFloatPrecision <= 18 // For floating-point precision of 18: // // LogOfMantissa = // -2.1072184f + // (4.2372794f + // (-3.7029485f + // (2.2781945f + // (-0.87823314f + // (0.19073739f - 0.17809712e-1f * x) * x) * x) * x) * x)*x; // // error 0.0000023660568, which is better than 18 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbc91e5ac, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x3e4350aa, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x3f60d3e3, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x4011cdf0, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, getF32Constant(DAG, 0x406cfd1c, dl)); SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, getF32Constant(DAG, 0x408797cb, dl)); SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); LogOfMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, getF32Constant(DAG, 0x4006dcab, dl)); } return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, LogOfMantissa); } // No special expansion. return DAG.getNode(ISD::FLOG, dl, Op.getValueType(), Op); } /// expandLog2 - Lower a log2 intrinsic. Handles the special sequences for /// limited-precision mode. static SDValue expandLog2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI) { // TODO: What fast-math-flags should be set on the floating-point nodes? if (Op.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); // Get the exponent. SDValue LogOfExponent = GetExponent(DAG, Op1, TLI, dl); // Get the significand and build it into a floating-point number with // exponent of 1. SDValue X = GetSignificand(DAG, Op1, dl); // Different possible minimax approximations of significand in // floating-point for various degrees of accuracy over [1,2]. SDValue Log2ofMantissa; if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // // Log2ofMantissa = -1.6749035f + (2.0246817f - .34484768f * x) * x; // // error 0.0049451742, which is more than 7 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbeb08fe0, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x40019463, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x3fd6633d, dl)); } else if (LimitFloatPrecision <= 12) { // For floating-point precision of 12: // // Log2ofMantissa = // -2.51285454f + // (4.07009056f + // (-2.12067489f + // (.645142248f - 0.816157886e-1f * x) * x) * x) * x; // // error 0.0000876136000, which is better than 13 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbda7262e, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x3f25280b, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x4007b923, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x40823e2f, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, getF32Constant(DAG, 0x4020d29c, dl)); } else { // LimitFloatPrecision <= 18 // For floating-point precision of 18: // // Log2ofMantissa = // -3.0400495f + // (6.1129976f + // (-5.3420409f + // (3.2865683f + // (-1.2669343f + // (0.27515199f - // 0.25691327e-1f * x) * x) * x) * x) * x) * x; // // error 0.0000018516, which is better than 18 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbcd2769e, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x3e8ce0b9, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); SDValue t3 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x3fa22ae7, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FADD, dl, MVT::f32, t4, getF32Constant(DAG, 0x40525723, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t7 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t6, getF32Constant(DAG, 0x40aaf200, dl)); SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); SDValue t9 = DAG.getNode(ISD::FADD, dl, MVT::f32, t8, getF32Constant(DAG, 0x40c39dad, dl)); SDValue t10 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t9, X); Log2ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t10, getF32Constant(DAG, 0x4042902c, dl)); } return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log2ofMantissa); } // No special expansion. return DAG.getNode(ISD::FLOG2, dl, Op.getValueType(), Op); } /// expandLog10 - Lower a log10 intrinsic. Handles the special sequences for /// limited-precision mode. static SDValue expandLog10(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI) { // TODO: What fast-math-flags should be set on the floating-point nodes? if (Op.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op); // Scale the exponent by log10(2) [0.30102999f]. SDValue Exp = GetExponent(DAG, Op1, TLI, dl); SDValue LogOfExponent = DAG.getNode(ISD::FMUL, dl, MVT::f32, Exp, getF32Constant(DAG, 0x3e9a209a, dl)); // Get the significand and build it into a floating-point number with // exponent of 1. SDValue X = GetSignificand(DAG, Op1, dl); SDValue Log10ofMantissa; if (LimitFloatPrecision <= 6) { // For floating-point precision of 6: // // Log10ofMantissa = // -0.50419619f + // (0.60948995f - 0.10380950f * x) * x; // // error 0.0014886165, which is 6 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0xbdd49a13, dl)); SDValue t1 = DAG.getNode(ISD::FADD, dl, MVT::f32, t0, getF32Constant(DAG, 0x3f1c0789, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t2, getF32Constant(DAG, 0x3f011300, dl)); } else if (LimitFloatPrecision <= 12) { // For floating-point precision of 12: // // Log10ofMantissa = // -0.64831180f + // (0.91751397f + // (-0.31664806f + 0.47637168e-1f * x) * x) * x; // // error 0.00019228036, which is better than 12 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0x3d431f31, dl)); SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, getF32Constant(DAG, 0x3ea21fb2, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, getF32Constant(DAG, 0x3f6ae232, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, getF32Constant(DAG, 0x3f25f7c3, dl)); } else { // LimitFloatPrecision <= 18 // For floating-point precision of 18: // // Log10ofMantissa = // -0.84299375f + // (1.5327582f + // (-1.0688956f + // (0.49102474f + // (-0.12539807f + 0.13508273e-1f * x) * x) * x) * x) * x; // // error 0.0000037995730, which is better than 18 bits SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, X, getF32Constant(DAG, 0x3c5d51ce, dl)); SDValue t1 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t0, getF32Constant(DAG, 0x3e00685a, dl)); SDValue t2 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t1, X); SDValue t3 = DAG.getNode(ISD::FADD, dl, MVT::f32, t2, getF32Constant(DAG, 0x3efb6798, dl)); SDValue t4 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t3, X); SDValue t5 = DAG.getNode(ISD::FSUB, dl, MVT::f32, t4, getF32Constant(DAG, 0x3f88d192, dl)); SDValue t6 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t5, X); SDValue t7 = DAG.getNode(ISD::FADD, dl, MVT::f32, t6, getF32Constant(DAG, 0x3fc4316c, dl)); SDValue t8 = DAG.getNode(ISD::FMUL, dl, MVT::f32, t7, X); Log10ofMantissa = DAG.getNode(ISD::FSUB, dl, MVT::f32, t8, getF32Constant(DAG, 0x3f57ce70, dl)); } return DAG.getNode(ISD::FADD, dl, MVT::f32, LogOfExponent, Log10ofMantissa); } // No special expansion. return DAG.getNode(ISD::FLOG10, dl, Op.getValueType(), Op); } /// expandExp2 - Lower an exp2 intrinsic. Handles the special sequences for /// limited-precision mode. static SDValue expandExp2(const SDLoc &dl, SDValue Op, SelectionDAG &DAG, const TargetLowering &TLI) { if (Op.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) return getLimitedPrecisionExp2(Op, dl, DAG); // No special expansion. return DAG.getNode(ISD::FEXP2, dl, Op.getValueType(), Op); } /// visitPow - Lower a pow intrinsic. Handles the special sequences for /// limited-precision mode with x == 10.0f. static SDValue expandPow(const SDLoc &dl, SDValue LHS, SDValue RHS, SelectionDAG &DAG, const TargetLowering &TLI) { bool IsExp10 = false; if (LHS.getValueType() == MVT::f32 && RHS.getValueType() == MVT::f32 && LimitFloatPrecision > 0 && LimitFloatPrecision <= 18) { if (ConstantFPSDNode *LHSC = dyn_cast<ConstantFPSDNode>(LHS)) { APFloat Ten(10.0f); IsExp10 = LHSC->isExactlyValue(Ten); } } // TODO: What fast-math-flags should be set on the FMUL node? if (IsExp10) { // Put the exponent in the right bit position for later addition to the // final result: // // #define LOG2OF10 3.3219281f // t0 = Op * LOG2OF10; SDValue t0 = DAG.getNode(ISD::FMUL, dl, MVT::f32, RHS, getF32Constant(DAG, 0x40549a78, dl)); return getLimitedPrecisionExp2(t0, dl, DAG); } // No special expansion. return DAG.getNode(ISD::FPOW, dl, LHS.getValueType(), LHS, RHS); } /// ExpandPowI - Expand a llvm.powi intrinsic. static SDValue ExpandPowI(const SDLoc &DL, SDValue LHS, SDValue RHS, SelectionDAG &DAG) { // If RHS is a constant, we can expand this out to a multiplication tree, // otherwise we end up lowering to a call to __powidf2 (for example). When // optimizing for size, we only want to do this if the expansion would produce // a small number of multiplies, otherwise we do the full expansion. if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) { // Get the exponent as a positive value. unsigned Val = RHSC->getSExtValue(); if ((int)Val < 0) Val = -Val; // powi(x, 0) -> 1.0 if (Val == 0) return DAG.getConstantFP(1.0, DL, LHS.getValueType()); const Function &F = DAG.getMachineFunction().getFunction(); if (!F.hasOptSize() || // If optimizing for size, don't insert too many multiplies. // This inserts up to 5 multiplies. countPopulation(Val) + Log2_32(Val) < 7) { // We use the simple binary decomposition method to generate the multiply // sequence. There are more optimal ways to do this (for example, // powi(x,15) generates one more multiply than it should), but this has // the benefit of being both really simple and much better than a libcall. SDValue Res; // Logically starts equal to 1.0 SDValue CurSquare = LHS; // TODO: Intrinsics should have fast-math-flags that propagate to these // nodes. while (Val) { if (Val & 1) { if (Res.getNode()) Res = DAG.getNode(ISD::FMUL, DL,Res.getValueType(), Res, CurSquare); else Res = CurSquare; // 1.0*CurSquare. } CurSquare = DAG.getNode(ISD::FMUL, DL, CurSquare.getValueType(), CurSquare, CurSquare); Val >>= 1; } // If the original was negative, invert the result, producing 1/(x*x*x). if (RHSC->getSExtValue() < 0) Res = DAG.getNode(ISD::FDIV, DL, LHS.getValueType(), DAG.getConstantFP(1.0, DL, LHS.getValueType()), Res); return Res; } } // Otherwise, expand to a libcall. return DAG.getNode(ISD::FPOWI, DL, LHS.getValueType(), LHS, RHS); } // getUnderlyingArgRegs - Find underlying registers used for a truncated, // bitcasted, or split argument. Returns a list of <Register, size in bits> static void getUnderlyingArgRegs(SmallVectorImpl<std::pair<unsigned, unsigned>> &Regs, const SDValue &N) { switch (N.getOpcode()) { case ISD::CopyFromReg: { SDValue Op = N.getOperand(1); Regs.emplace_back(cast<RegisterSDNode>(Op)->getReg(), Op.getValueType().getSizeInBits()); return; } case ISD::BITCAST: case ISD::AssertZext: case ISD::AssertSext: case ISD::TRUNCATE: getUnderlyingArgRegs(Regs, N.getOperand(0)); return; case ISD::BUILD_PAIR: case ISD::BUILD_VECTOR: case ISD::CONCAT_VECTORS: for (SDValue Op : N->op_values()) getUnderlyingArgRegs(Regs, Op); return; default: return; } } /// If the DbgValueInst is a dbg_value of a function argument, create the /// corresponding DBG_VALUE machine instruction for it now. At the end of /// instruction selection, they will be inserted to the entry BB. bool SelectionDAGBuilder::EmitFuncArgumentDbgValue( const Value *V, DILocalVariable *Variable, DIExpression *Expr, DILocation *DL, bool IsDbgDeclare, const SDValue &N) { const Argument *Arg = dyn_cast<Argument>(V); if (!Arg) return false; if (!IsDbgDeclare) { // ArgDbgValues are hoisted to the beginning of the entry block. So we // should only emit as ArgDbgValue if the dbg.value intrinsic is found in // the entry block. bool IsInEntryBlock = FuncInfo.MBB == &FuncInfo.MF->front(); if (!IsInEntryBlock) return false; // ArgDbgValues are hoisted to the beginning of the entry block. So we // should only emit as ArgDbgValue if the dbg.value intrinsic describes a // variable that also is a param. // // Although, if we are at the top of the entry block already, we can still // emit using ArgDbgValue. This might catch some situations when the // dbg.value refers to an argument that isn't used in the entry block, so // any CopyToReg node would be optimized out and the only way to express // this DBG_VALUE is by using the physical reg (or FI) as done in this // method. ArgDbgValues are hoisted to the beginning of the entry block. So // we should only emit as ArgDbgValue if the Variable is an argument to the // current function, and the dbg.value intrinsic is found in the entry // block. bool VariableIsFunctionInputArg = Variable->isParameter() && !DL->getInlinedAt(); bool IsInPrologue = SDNodeOrder == LowestSDNodeOrder; if (!IsInPrologue && !VariableIsFunctionInputArg) return false; // Here we assume that a function argument on IR level only can be used to // describe one input parameter on source level. If we for example have // source code like this // // struct A { long x, y; }; // void foo(struct A a, long b) { // ... // b = a.x; // ... // } // // and IR like this // // define void @foo(i32 %a1, i32 %a2, i32 %b) { // entry: // call void @llvm.dbg.value(metadata i32 %a1, "a", DW_OP_LLVM_fragment // call void @llvm.dbg.value(metadata i32 %a2, "a", DW_OP_LLVM_fragment // call void @llvm.dbg.value(metadata i32 %b, "b", // ... // call void @llvm.dbg.value(metadata i32 %a1, "b" // ... // // then the last dbg.value is describing a parameter "b" using a value that // is an argument. But since we already has used %a1 to describe a parameter // we should not handle that last dbg.value here (that would result in an // incorrect hoisting of the DBG_VALUE to the function entry). // Notice that we allow one dbg.value per IR level argument, to accomodate // for the situation with fragments above. if (VariableIsFunctionInputArg) { unsigned ArgNo = Arg->getArgNo(); if (ArgNo >= FuncInfo.DescribedArgs.size()) FuncInfo.DescribedArgs.resize(ArgNo + 1, false); else if (!IsInPrologue && FuncInfo.DescribedArgs.test(ArgNo)) return false; FuncInfo.DescribedArgs.set(ArgNo); } } MachineFunction &MF = DAG.getMachineFunction(); const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); bool IsIndirect = false; Optional<MachineOperand> Op; // Some arguments' frame index is recorded during argument lowering. int FI = FuncInfo.getArgumentFrameIndex(Arg); if (FI != std::numeric_limits<int>::max()) Op = MachineOperand::CreateFI(FI); SmallVector<std::pair<unsigned, unsigned>, 8> ArgRegsAndSizes; if (!Op && N.getNode()) { getUnderlyingArgRegs(ArgRegsAndSizes, N); Register Reg; if (ArgRegsAndSizes.size() == 1) Reg = ArgRegsAndSizes.front().first; if (Reg && Reg.isVirtual()) { MachineRegisterInfo &RegInfo = MF.getRegInfo(); Register PR = RegInfo.getLiveInPhysReg(Reg); if (PR) Reg = PR; } if (Reg) { Op = MachineOperand::CreateReg(Reg, false); IsIndirect = IsDbgDeclare; } } if (!Op && N.getNode()) { // Check if frame index is available. SDValue LCandidate = peekThroughBitcasts(N); if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(LCandidate.getNode())) if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) Op = MachineOperand::CreateFI(FINode->getIndex()); } if (!Op) { // Create a DBG_VALUE for each decomposed value in ArgRegs to cover Reg auto splitMultiRegDbgValue = [&](ArrayRef<std::pair<unsigned, unsigned>> SplitRegs) { unsigned Offset = 0; for (auto RegAndSize : SplitRegs) { auto FragmentExpr = DIExpression::createFragmentExpression( Expr, Offset, RegAndSize.second); if (!FragmentExpr) continue; FuncInfo.ArgDbgValues.push_back( BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsDbgDeclare, RegAndSize.first, Variable, *FragmentExpr)); Offset += RegAndSize.second; } }; // Check if ValueMap has reg number. DenseMap<const Value *, unsigned>::const_iterator VMI = FuncInfo.ValueMap.find(V); if (VMI != FuncInfo.ValueMap.end()) { const auto &TLI = DAG.getTargetLoweringInfo(); RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), VMI->second, V->getType(), getABIRegCopyCC(V)); if (RFV.occupiesMultipleRegs()) { splitMultiRegDbgValue(RFV.getRegsAndSizes()); return true; } Op = MachineOperand::CreateReg(VMI->second, false); IsIndirect = IsDbgDeclare; } else if (ArgRegsAndSizes.size() > 1) { // This was split due to the calling convention, and no virtual register // mapping exists for the value. splitMultiRegDbgValue(ArgRegsAndSizes); return true; } } if (!Op) return false; assert(Variable->isValidLocationForIntrinsic(DL) && "Expected inlined-at fields to agree"); IsIndirect = (Op->isReg()) ? IsIndirect : true; FuncInfo.ArgDbgValues.push_back( BuildMI(MF, DL, TII->get(TargetOpcode::DBG_VALUE), IsIndirect, *Op, Variable, Expr)); return true; } /// Return the appropriate SDDbgValue based on N. SDDbgValue *SelectionDAGBuilder::getDbgValue(SDValue N, DILocalVariable *Variable, DIExpression *Expr, const DebugLoc &dl, unsigned DbgSDNodeOrder) { if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) { // Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe // stack slot locations. // // Consider "int x = 0; int *px = &x;". There are two kinds of interesting // debug values here after optimization: // // dbg.value(i32* %px, !"int *px", !DIExpression()), and // dbg.value(i32* %px, !"int x", !DIExpression(DW_OP_deref)) // // Both describe the direct values of their associated variables. return DAG.getFrameIndexDbgValue(Variable, Expr, FISDN->getIndex(), /*IsIndirect*/ false, dl, DbgSDNodeOrder); } return DAG.getDbgValue(Variable, Expr, N.getNode(), N.getResNo(), /*IsIndirect*/ false, dl, DbgSDNodeOrder); } // VisualStudio defines setjmp as _setjmp #if defined(_MSC_VER) && defined(setjmp) && \ !defined(setjmp_undefined_for_msvc) # pragma push_macro("setjmp") # undef setjmp # define setjmp_undefined_for_msvc #endif static unsigned FixedPointIntrinsicToOpcode(unsigned Intrinsic) { switch (Intrinsic) { case Intrinsic::smul_fix: return ISD::SMULFIX; case Intrinsic::umul_fix: return ISD::UMULFIX; default: llvm_unreachable("Unhandled fixed point intrinsic"); } } void SelectionDAGBuilder::lowerCallToExternalSymbol(const CallInst &I, const char *FunctionName) { assert(FunctionName && "FunctionName must not be nullptr"); SDValue Callee = DAG.getExternalSymbol( FunctionName, DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())); LowerCallTo(&I, Callee, I.isTailCall()); } /// Lower the call to the specified intrinsic function. void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDLoc sdl = getCurSDLoc(); DebugLoc dl = getCurDebugLoc(); SDValue Res; switch (Intrinsic) { default: // By default, turn this into a target intrinsic node. visitTargetIntrinsic(I, Intrinsic); return; case Intrinsic::vastart: visitVAStart(I); return; case Intrinsic::vaend: visitVAEnd(I); return; case Intrinsic::vacopy: visitVACopy(I); return; case Intrinsic::returnaddress: setValue(&I, DAG.getNode(ISD::RETURNADDR, sdl, TLI.getPointerTy(DAG.getDataLayout()), getValue(I.getArgOperand(0)))); return; case Intrinsic::addressofreturnaddress: setValue(&I, DAG.getNode(ISD::ADDROFRETURNADDR, sdl, TLI.getPointerTy(DAG.getDataLayout()))); return; case Intrinsic::sponentry: setValue(&I, DAG.getNode(ISD::SPONENTRY, sdl, TLI.getFrameIndexTy(DAG.getDataLayout()))); return; case Intrinsic::frameaddress: setValue(&I, DAG.getNode(ISD::FRAMEADDR, sdl, TLI.getFrameIndexTy(DAG.getDataLayout()), getValue(I.getArgOperand(0)))); return; case Intrinsic::read_register: { Value *Reg = I.getArgOperand(0); SDValue Chain = getRoot(); SDValue RegName = DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); Res = DAG.getNode(ISD::READ_REGISTER, sdl, DAG.getVTList(VT, MVT::Other), Chain, RegName); setValue(&I, Res); DAG.setRoot(Res.getValue(1)); return; } case Intrinsic::write_register: { Value *Reg = I.getArgOperand(0); Value *RegValue = I.getArgOperand(1); SDValue Chain = getRoot(); SDValue RegName = DAG.getMDNode(cast<MDNode>(cast<MetadataAsValue>(Reg)->getMetadata())); DAG.setRoot(DAG.getNode(ISD::WRITE_REGISTER, sdl, MVT::Other, Chain, RegName, getValue(RegValue))); return; } case Intrinsic::setjmp: lowerCallToExternalSymbol(I, &"_setjmp"[!TLI.usesUnderscoreSetJmp()]); return; case Intrinsic::longjmp: lowerCallToExternalSymbol(I, &"_longjmp"[!TLI.usesUnderscoreLongJmp()]); return; case Intrinsic::memcpy: { const auto &MCI = cast<MemCpyInst>(I); SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); SDValue Op3 = getValue(I.getArgOperand(2)); // @llvm.memcpy defines 0 and 1 to both mean no alignment. unsigned DstAlign = std::max<unsigned>(MCI.getDestAlignment(), 1); unsigned SrcAlign = std::max<unsigned>(MCI.getSourceAlignment(), 1); unsigned Align = MinAlign(DstAlign, SrcAlign); bool isVol = MCI.isVolatile(); bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); // FIXME: Support passing different dest/src alignments to the memcpy DAG // node. SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, false, isTC, MachinePointerInfo(I.getArgOperand(0)), MachinePointerInfo(I.getArgOperand(1))); updateDAGForMaybeTailCall(MC); return; } case Intrinsic::memset: { const auto &MSI = cast<MemSetInst>(I); SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); SDValue Op3 = getValue(I.getArgOperand(2)); // @llvm.memset defines 0 and 1 to both mean no alignment. unsigned Align = std::max<unsigned>(MSI.getDestAlignment(), 1); bool isVol = MSI.isVolatile(); bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, isTC, MachinePointerInfo(I.getArgOperand(0))); updateDAGForMaybeTailCall(MS); return; } case Intrinsic::memmove: { const auto &MMI = cast<MemMoveInst>(I); SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); SDValue Op3 = getValue(I.getArgOperand(2)); // @llvm.memmove defines 0 and 1 to both mean no alignment. unsigned DstAlign = std::max<unsigned>(MMI.getDestAlignment(), 1); unsigned SrcAlign = std::max<unsigned>(MMI.getSourceAlignment(), 1); unsigned Align = MinAlign(DstAlign, SrcAlign); bool isVol = MMI.isVolatile(); bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); // FIXME: Support passing different dest/src alignments to the memmove DAG // node. SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol, isTC, MachinePointerInfo(I.getArgOperand(0)), MachinePointerInfo(I.getArgOperand(1))); updateDAGForMaybeTailCall(MM); return; } case Intrinsic::memcpy_element_unordered_atomic: { const AtomicMemCpyInst &MI = cast<AtomicMemCpyInst>(I); SDValue Dst = getValue(MI.getRawDest()); SDValue Src = getValue(MI.getRawSource()); SDValue Length = getValue(MI.getLength()); unsigned DstAlign = MI.getDestAlignment(); unsigned SrcAlign = MI.getSourceAlignment(); Type *LengthTy = MI.getLength()->getType(); unsigned ElemSz = MI.getElementSizeInBytes(); bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); SDValue MC = DAG.getAtomicMemcpy(getRoot(), sdl, Dst, DstAlign, Src, SrcAlign, Length, LengthTy, ElemSz, isTC, MachinePointerInfo(MI.getRawDest()), MachinePointerInfo(MI.getRawSource())); updateDAGForMaybeTailCall(MC); return; } case Intrinsic::memmove_element_unordered_atomic: { auto &MI = cast<AtomicMemMoveInst>(I); SDValue Dst = getValue(MI.getRawDest()); SDValue Src = getValue(MI.getRawSource()); SDValue Length = getValue(MI.getLength()); unsigned DstAlign = MI.getDestAlignment(); unsigned SrcAlign = MI.getSourceAlignment(); Type *LengthTy = MI.getLength()->getType(); unsigned ElemSz = MI.getElementSizeInBytes(); bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); SDValue MC = DAG.getAtomicMemmove(getRoot(), sdl, Dst, DstAlign, Src, SrcAlign, Length, LengthTy, ElemSz, isTC, MachinePointerInfo(MI.getRawDest()), MachinePointerInfo(MI.getRawSource())); updateDAGForMaybeTailCall(MC); return; } case Intrinsic::memset_element_unordered_atomic: { auto &MI = cast<AtomicMemSetInst>(I); SDValue Dst = getValue(MI.getRawDest()); SDValue Val = getValue(MI.getValue()); SDValue Length = getValue(MI.getLength()); unsigned DstAlign = MI.getDestAlignment(); Type *LengthTy = MI.getLength()->getType(); unsigned ElemSz = MI.getElementSizeInBytes(); bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget()); SDValue MC = DAG.getAtomicMemset(getRoot(), sdl, Dst, DstAlign, Val, Length, LengthTy, ElemSz, isTC, MachinePointerInfo(MI.getRawDest())); updateDAGForMaybeTailCall(MC); return; } case Intrinsic::dbg_addr: case Intrinsic::dbg_declare: { const auto &DI = cast<DbgVariableIntrinsic>(I); DILocalVariable *Variable = DI.getVariable(); DIExpression *Expression = DI.getExpression(); dropDanglingDebugInfo(Variable, Expression); assert(Variable && "Missing variable"); // Check if address has undef value. const Value *Address = DI.getVariableLocation(); if (!Address || isa<UndefValue>(Address) || (Address->use_empty() && !isa<Argument>(Address))) { LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); return; } bool isParameter = Variable->isParameter() || isa<Argument>(Address); // Check if this variable can be described by a frame index, typically // either as a static alloca or a byval parameter. int FI = std::numeric_limits<int>::max(); if (const auto *AI = dyn_cast<AllocaInst>(Address->stripInBoundsConstantOffsets())) { if (AI->isStaticAlloca()) { auto I = FuncInfo.StaticAllocaMap.find(AI); if (I != FuncInfo.StaticAllocaMap.end()) FI = I->second; } } else if (const auto *Arg = dyn_cast<Argument>( Address->stripInBoundsConstantOffsets())) { FI = FuncInfo.getArgumentFrameIndex(Arg); } // llvm.dbg.addr is control dependent and always generates indirect // DBG_VALUE instructions. llvm.dbg.declare is handled as a frame index in // the MachineFunction variable table. if (FI != std::numeric_limits<int>::max()) { if (Intrinsic == Intrinsic::dbg_addr) { SDDbgValue *SDV = DAG.getFrameIndexDbgValue( Variable, Expression, FI, /*IsIndirect*/ true, dl, SDNodeOrder); DAG.AddDbgValue(SDV, getRoot().getNode(), isParameter); } return; } SDValue &N = NodeMap[Address]; if (!N.getNode() && isa<Argument>(Address)) // Check unused arguments map. N = UnusedArgNodeMap[Address]; SDDbgValue *SDV; if (N.getNode()) { if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Address)) Address = BCI->getOperand(0); // Parameters are handled specially. auto FINode = dyn_cast<FrameIndexSDNode>(N.getNode()); if (isParameter && FINode) { // Byval parameter. We have a frame index at this point. SDV = DAG.getFrameIndexDbgValue(Variable, Expression, FINode->getIndex(), /*IsIndirect*/ true, dl, SDNodeOrder); } else if (isa<Argument>(Address)) { // Address is an argument, so try to emit its dbg value using // virtual register info from the FuncInfo.ValueMap. EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N); return; } else { SDV = DAG.getDbgValue(Variable, Expression, N.getNode(), N.getResNo(), true, dl, SDNodeOrder); } DAG.AddDbgValue(SDV, N.getNode(), isParameter); } else { // If Address is an argument then try to emit its dbg value using // virtual register info from the FuncInfo.ValueMap. if (!EmitFuncArgumentDbgValue(Address, Variable, Expression, dl, true, N)) { LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n"); } } return; } case Intrinsic::dbg_label: { const DbgLabelInst &DI = cast<DbgLabelInst>(I); DILabel *Label = DI.getLabel(); assert(Label && "Missing label"); SDDbgLabel *SDV; SDV = DAG.getDbgLabel(Label, dl, SDNodeOrder); DAG.AddDbgLabel(SDV); return; } case Intrinsic::dbg_value: { const DbgValueInst &DI = cast<DbgValueInst>(I); assert(DI.getVariable() && "Missing variable"); DILocalVariable *Variable = DI.getVariable(); DIExpression *Expression = DI.getExpression(); dropDanglingDebugInfo(Variable, Expression); const Value *V = DI.getValue(); if (!V) return; if (handleDebugValue(V, Variable, Expression, dl, DI.getDebugLoc(), SDNodeOrder)) return; // TODO: Dangling debug info will eventually either be resolved or produce // an Undef DBG_VALUE. However in the resolution case, a gap may appear // between the original dbg.value location and its resolved DBG_VALUE, which // we should ideally fill with an extra Undef DBG_VALUE. DanglingDebugInfoMap[V].emplace_back(&DI, dl, SDNodeOrder); return; } case Intrinsic::eh_typeid_for: { // Find the type id for the given typeinfo. GlobalValue *GV = ExtractTypeInfo(I.getArgOperand(0)); unsigned TypeID = DAG.getMachineFunction().getTypeIDFor(GV); Res = DAG.getConstant(TypeID, sdl, MVT::i32); setValue(&I, Res); return; } case Intrinsic::eh_return_i32: case Intrinsic::eh_return_i64: DAG.getMachineFunction().setCallsEHReturn(true); DAG.setRoot(DAG.getNode(ISD::EH_RETURN, sdl, MVT::Other, getControlRoot(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)))); return; case Intrinsic::eh_unwind_init: DAG.getMachineFunction().setCallsUnwindInit(true); return; case Intrinsic::eh_dwarf_cfa: setValue(&I, DAG.getNode(ISD::EH_DWARF_CFA, sdl, TLI.getPointerTy(DAG.getDataLayout()), getValue(I.getArgOperand(0)))); return; case Intrinsic::eh_sjlj_callsite: { MachineModuleInfo &MMI = DAG.getMachineFunction().getMMI(); ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(0)); assert(CI && "Non-constant call site value in eh.sjlj.callsite!"); assert(MMI.getCurrentCallSite() == 0 && "Overlapping call sites!"); MMI.setCurrentCallSite(CI->getZExtValue()); return; } case Intrinsic::eh_sjlj_functioncontext: { // Get and store the index of the function context. MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); AllocaInst *FnCtx = cast<AllocaInst>(I.getArgOperand(0)->stripPointerCasts()); int FI = FuncInfo.StaticAllocaMap[FnCtx]; MFI.setFunctionContextIndex(FI); return; } case Intrinsic::eh_sjlj_setjmp: { SDValue Ops[2]; Ops[0] = getRoot(); Ops[1] = getValue(I.getArgOperand(0)); SDValue Op = DAG.getNode(ISD::EH_SJLJ_SETJMP, sdl, DAG.getVTList(MVT::i32, MVT::Other), Ops); setValue(&I, Op.getValue(0)); DAG.setRoot(Op.getValue(1)); return; } case Intrinsic::eh_sjlj_longjmp: DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_LONGJMP, sdl, MVT::Other, getRoot(), getValue(I.getArgOperand(0)))); return; case Intrinsic::eh_sjlj_setup_dispatch: DAG.setRoot(DAG.getNode(ISD::EH_SJLJ_SETUP_DISPATCH, sdl, MVT::Other, getRoot())); return; case Intrinsic::masked_gather: visitMaskedGather(I); return; case Intrinsic::masked_load: visitMaskedLoad(I); return; case Intrinsic::masked_scatter: visitMaskedScatter(I); return; case Intrinsic::masked_store: visitMaskedStore(I); return; case Intrinsic::masked_expandload: visitMaskedLoad(I, true /* IsExpanding */); return; case Intrinsic::masked_compressstore: visitMaskedStore(I, true /* IsCompressing */); return; case Intrinsic::powi: setValue(&I, ExpandPowI(sdl, getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), DAG)); return; case Intrinsic::log: setValue(&I, expandLog(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); return; case Intrinsic::log2: setValue(&I, expandLog2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); return; case Intrinsic::log10: setValue(&I, expandLog10(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); return; case Intrinsic::exp: setValue(&I, expandExp(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); return; case Intrinsic::exp2: setValue(&I, expandExp2(sdl, getValue(I.getArgOperand(0)), DAG, TLI)); return; case Intrinsic::pow: setValue(&I, expandPow(sdl, getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), DAG, TLI)); return; case Intrinsic::sqrt: case Intrinsic::fabs: case Intrinsic::sin: case Intrinsic::cos: case Intrinsic::floor: case Intrinsic::ceil: case Intrinsic::trunc: case Intrinsic::rint: case Intrinsic::nearbyint: case Intrinsic::round: case Intrinsic::canonicalize: { unsigned Opcode; switch (Intrinsic) { default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. case Intrinsic::sqrt: Opcode = ISD::FSQRT; break; case Intrinsic::fabs: Opcode = ISD::FABS; break; case Intrinsic::sin: Opcode = ISD::FSIN; break; case Intrinsic::cos: Opcode = ISD::FCOS; break; case Intrinsic::floor: Opcode = ISD::FFLOOR; break; case Intrinsic::ceil: Opcode = ISD::FCEIL; break; case Intrinsic::trunc: Opcode = ISD::FTRUNC; break; case Intrinsic::rint: Opcode = ISD::FRINT; break; case Intrinsic::nearbyint: Opcode = ISD::FNEARBYINT; break; case Intrinsic::round: Opcode = ISD::FROUND; break; case Intrinsic::canonicalize: Opcode = ISD::FCANONICALIZE; break; } setValue(&I, DAG.getNode(Opcode, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)))); return; } case Intrinsic::lround: case Intrinsic::llround: case Intrinsic::lrint: case Intrinsic::llrint: { unsigned Opcode; switch (Intrinsic) { default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. case Intrinsic::lround: Opcode = ISD::LROUND; break; case Intrinsic::llround: Opcode = ISD::LLROUND; break; case Intrinsic::lrint: Opcode = ISD::LRINT; break; case Intrinsic::llrint: Opcode = ISD::LLRINT; break; } EVT RetVT = TLI.getValueType(DAG.getDataLayout(), I.getType()); setValue(&I, DAG.getNode(Opcode, sdl, RetVT, getValue(I.getArgOperand(0)))); return; } case Intrinsic::minnum: setValue(&I, DAG.getNode(ISD::FMINNUM, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)))); return; case Intrinsic::maxnum: setValue(&I, DAG.getNode(ISD::FMAXNUM, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)))); return; case Intrinsic::minimum: setValue(&I, DAG.getNode(ISD::FMINIMUM, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)))); return; case Intrinsic::maximum: setValue(&I, DAG.getNode(ISD::FMAXIMUM, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)))); return; case Intrinsic::copysign: setValue(&I, DAG.getNode(ISD::FCOPYSIGN, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)))); return; case Intrinsic::fma: setValue(&I, DAG.getNode(ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), getValue(I.getArgOperand(2)))); return; case Intrinsic::experimental_constrained_fadd: case Intrinsic::experimental_constrained_fsub: case Intrinsic::experimental_constrained_fmul: case Intrinsic::experimental_constrained_fdiv: case Intrinsic::experimental_constrained_frem: case Intrinsic::experimental_constrained_fma: case Intrinsic::experimental_constrained_fptosi: case Intrinsic::experimental_constrained_fptoui: case Intrinsic::experimental_constrained_fptrunc: case Intrinsic::experimental_constrained_fpext: case Intrinsic::experimental_constrained_sqrt: case Intrinsic::experimental_constrained_pow: case Intrinsic::experimental_constrained_powi: case Intrinsic::experimental_constrained_sin: case Intrinsic::experimental_constrained_cos: case Intrinsic::experimental_constrained_exp: case Intrinsic::experimental_constrained_exp2: case Intrinsic::experimental_constrained_log: case Intrinsic::experimental_constrained_log10: case Intrinsic::experimental_constrained_log2: case Intrinsic::experimental_constrained_rint: case Intrinsic::experimental_constrained_nearbyint: case Intrinsic::experimental_constrained_maxnum: case Intrinsic::experimental_constrained_minnum: case Intrinsic::experimental_constrained_ceil: case Intrinsic::experimental_constrained_floor: case Intrinsic::experimental_constrained_round: case Intrinsic::experimental_constrained_trunc: visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(I)); return; case Intrinsic::fmuladd: { EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict && TLI.isFMAFasterThanFMulAndFAdd(VT)) { setValue(&I, DAG.getNode(ISD::FMA, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), getValue(I.getArgOperand(2)))); } else { // TODO: Intrinsic calls should have fast-math-flags. SDValue Mul = DAG.getNode(ISD::FMUL, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1))); SDValue Add = DAG.getNode(ISD::FADD, sdl, getValue(I.getArgOperand(0)).getValueType(), Mul, getValue(I.getArgOperand(2))); setValue(&I, Add); } return; } case Intrinsic::convert_to_fp16: setValue(&I, DAG.getNode(ISD::BITCAST, sdl, MVT::i16, DAG.getNode(ISD::FP_ROUND, sdl, MVT::f16, getValue(I.getArgOperand(0)), DAG.getTargetConstant(0, sdl, MVT::i32)))); return; case Intrinsic::convert_from_fp16: setValue(&I, DAG.getNode(ISD::FP_EXTEND, sdl, TLI.getValueType(DAG.getDataLayout(), I.getType()), DAG.getNode(ISD::BITCAST, sdl, MVT::f16, getValue(I.getArgOperand(0))))); return; case Intrinsic::pcmarker: { SDValue Tmp = getValue(I.getArgOperand(0)); DAG.setRoot(DAG.getNode(ISD::PCMARKER, sdl, MVT::Other, getRoot(), Tmp)); return; } case Intrinsic::readcyclecounter: { SDValue Op = getRoot(); Res = DAG.getNode(ISD::READCYCLECOUNTER, sdl, DAG.getVTList(MVT::i64, MVT::Other), Op); setValue(&I, Res); DAG.setRoot(Res.getValue(1)); return; } case Intrinsic::bitreverse: setValue(&I, DAG.getNode(ISD::BITREVERSE, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)))); return; case Intrinsic::bswap: setValue(&I, DAG.getNode(ISD::BSWAP, sdl, getValue(I.getArgOperand(0)).getValueType(), getValue(I.getArgOperand(0)))); return; case Intrinsic::cttz: { SDValue Arg = getValue(I.getArgOperand(0)); ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); EVT Ty = Arg.getValueType(); setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTTZ : ISD::CTTZ_ZERO_UNDEF, sdl, Ty, Arg)); return; } case Intrinsic::ctlz: { SDValue Arg = getValue(I.getArgOperand(0)); ConstantInt *CI = cast<ConstantInt>(I.getArgOperand(1)); EVT Ty = Arg.getValueType(); setValue(&I, DAG.getNode(CI->isZero() ? ISD::CTLZ : ISD::CTLZ_ZERO_UNDEF, sdl, Ty, Arg)); return; } case Intrinsic::ctpop: { SDValue Arg = getValue(I.getArgOperand(0)); EVT Ty = Arg.getValueType(); setValue(&I, DAG.getNode(ISD::CTPOP, sdl, Ty, Arg)); return; } case Intrinsic::fshl: case Intrinsic::fshr: { bool IsFSHL = Intrinsic == Intrinsic::fshl; SDValue X = getValue(I.getArgOperand(0)); SDValue Y = getValue(I.getArgOperand(1)); SDValue Z = getValue(I.getArgOperand(2)); EVT VT = X.getValueType(); SDValue BitWidthC = DAG.getConstant(VT.getScalarSizeInBits(), sdl, VT); SDValue Zero = DAG.getConstant(0, sdl, VT); SDValue ShAmt = DAG.getNode(ISD::UREM, sdl, VT, Z, BitWidthC); auto FunnelOpcode = IsFSHL ? ISD::FSHL : ISD::FSHR; if (TLI.isOperationLegalOrCustom(FunnelOpcode, VT)) { setValue(&I, DAG.getNode(FunnelOpcode, sdl, VT, X, Y, Z)); return; } // When X == Y, this is rotate. If the data type has a power-of-2 size, we // avoid the select that is necessary in the general case to filter out // the 0-shift possibility that leads to UB. if (X == Y && isPowerOf2_32(VT.getScalarSizeInBits())) { auto RotateOpcode = IsFSHL ? ISD::ROTL : ISD::ROTR; if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) { setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, Z)); return; } // Some targets only rotate one way. Try the opposite direction. RotateOpcode = IsFSHL ? ISD::ROTR : ISD::ROTL; if (TLI.isOperationLegalOrCustom(RotateOpcode, VT)) { // Negate the shift amount because it is safe to ignore the high bits. SDValue NegShAmt = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z); setValue(&I, DAG.getNode(RotateOpcode, sdl, VT, X, NegShAmt)); return; } // fshl (rotl): (X << (Z % BW)) | (X >> ((0 - Z) % BW)) // fshr (rotr): (X << ((0 - Z) % BW)) | (X >> (Z % BW)) SDValue NegZ = DAG.getNode(ISD::SUB, sdl, VT, Zero, Z); SDValue NShAmt = DAG.getNode(ISD::UREM, sdl, VT, NegZ, BitWidthC); SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : NShAmt); SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, X, IsFSHL ? NShAmt : ShAmt); setValue(&I, DAG.getNode(ISD::OR, sdl, VT, ShX, ShY)); return; } // fshl: (X << (Z % BW)) | (Y >> (BW - (Z % BW))) // fshr: (X << (BW - (Z % BW))) | (Y >> (Z % BW)) SDValue InvShAmt = DAG.getNode(ISD::SUB, sdl, VT, BitWidthC, ShAmt); SDValue ShX = DAG.getNode(ISD::SHL, sdl, VT, X, IsFSHL ? ShAmt : InvShAmt); SDValue ShY = DAG.getNode(ISD::SRL, sdl, VT, Y, IsFSHL ? InvShAmt : ShAmt); SDValue Or = DAG.getNode(ISD::OR, sdl, VT, ShX, ShY); // If (Z % BW == 0), then the opposite direction shift is shift-by-bitwidth, // and that is undefined. We must compare and select to avoid UB. EVT CCVT = MVT::i1; if (VT.isVector()) CCVT = EVT::getVectorVT(*Context, CCVT, VT.getVectorNumElements()); // For fshl, 0-shift returns the 1st arg (X). // For fshr, 0-shift returns the 2nd arg (Y). SDValue IsZeroShift = DAG.getSetCC(sdl, CCVT, ShAmt, Zero, ISD::SETEQ); setValue(&I, DAG.getSelect(sdl, VT, IsZeroShift, IsFSHL ? X : Y, Or)); return; } case Intrinsic::sadd_sat: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); setValue(&I, DAG.getNode(ISD::SADDSAT, sdl, Op1.getValueType(), Op1, Op2)); return; } case Intrinsic::uadd_sat: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); setValue(&I, DAG.getNode(ISD::UADDSAT, sdl, Op1.getValueType(), Op1, Op2)); return; } case Intrinsic::ssub_sat: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); setValue(&I, DAG.getNode(ISD::SSUBSAT, sdl, Op1.getValueType(), Op1, Op2)); return; } case Intrinsic::usub_sat: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); setValue(&I, DAG.getNode(ISD::USUBSAT, sdl, Op1.getValueType(), Op1, Op2)); return; } case Intrinsic::smul_fix: case Intrinsic::umul_fix: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); SDValue Op3 = getValue(I.getArgOperand(2)); setValue(&I, DAG.getNode(FixedPointIntrinsicToOpcode(Intrinsic), sdl, Op1.getValueType(), Op1, Op2, Op3)); return; } case Intrinsic::smul_fix_sat: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); SDValue Op3 = getValue(I.getArgOperand(2)); setValue(&I, DAG.getNode(ISD::SMULFIXSAT, sdl, Op1.getValueType(), Op1, Op2, Op3)); return; } case Intrinsic::umul_fix_sat: { SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); SDValue Op3 = getValue(I.getArgOperand(2)); setValue(&I, DAG.getNode(ISD::UMULFIXSAT, sdl, Op1.getValueType(), Op1, Op2, Op3)); return; } case Intrinsic::stacksave: { SDValue Op = getRoot(); Res = DAG.getNode( ISD::STACKSAVE, sdl, DAG.getVTList(TLI.getPointerTy(DAG.getDataLayout()), MVT::Other), Op); setValue(&I, Res); DAG.setRoot(Res.getValue(1)); return; } case Intrinsic::stackrestore: Res = getValue(I.getArgOperand(0)); DAG.setRoot(DAG.getNode(ISD::STACKRESTORE, sdl, MVT::Other, getRoot(), Res)); return; case Intrinsic::get_dynamic_area_offset: { SDValue Op = getRoot(); EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); EVT ResTy = TLI.getValueType(DAG.getDataLayout(), I.getType()); // Result type for @llvm.get.dynamic.area.offset should match PtrTy for // target. if (PtrTy.getSizeInBits() < ResTy.getSizeInBits()) report_fatal_error("Wrong result type for @llvm.get.dynamic.area.offset" " intrinsic!"); Res = DAG.getNode(ISD::GET_DYNAMIC_AREA_OFFSET, sdl, DAG.getVTList(ResTy), Op); DAG.setRoot(Op); setValue(&I, Res); return; } case Intrinsic::stackguard: { EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); MachineFunction &MF = DAG.getMachineFunction(); const Module &M = *MF.getFunction().getParent(); SDValue Chain = getRoot(); if (TLI.useLoadStackGuardNode()) { Res = getLoadStackGuard(DAG, sdl, Chain); } else { const Value *Global = TLI.getSDagStackGuard(M); unsigned Align = DL->getPrefTypeAlignment(Global->getType()); Res = DAG.getLoad(PtrTy, sdl, Chain, getValue(Global), MachinePointerInfo(Global, 0), Align, MachineMemOperand::MOVolatile); } if (TLI.useStackGuardXorFP()) Res = TLI.emitStackGuardXorFP(DAG, Res, sdl); DAG.setRoot(Chain); setValue(&I, Res); return; } case Intrinsic::stackprotector: { // Emit code into the DAG to store the stack guard onto the stack. MachineFunction &MF = DAG.getMachineFunction(); MachineFrameInfo &MFI = MF.getFrameInfo(); EVT PtrTy = TLI.getPointerTy(DAG.getDataLayout()); SDValue Src, Chain = getRoot(); if (TLI.useLoadStackGuardNode()) Src = getLoadStackGuard(DAG, sdl, Chain); else Src = getValue(I.getArgOperand(0)); // The guard's value. AllocaInst *Slot = cast<AllocaInst>(I.getArgOperand(1)); int FI = FuncInfo.StaticAllocaMap[Slot]; MFI.setStackProtectorIndex(FI); SDValue FIN = DAG.getFrameIndex(FI, PtrTy); // Store the stack protector onto the stack. Res = DAG.getStore(Chain, sdl, Src, FIN, MachinePointerInfo::getFixedStack( DAG.getMachineFunction(), FI), /* Alignment = */ 0, MachineMemOperand::MOVolatile); setValue(&I, Res); DAG.setRoot(Res); return; } case Intrinsic::objectsize: { // If we don't know by now, we're never going to know. ConstantInt *CI = dyn_cast<ConstantInt>(I.getArgOperand(1)); assert(CI && "Non-constant type in __builtin_object_size?"); SDValue Arg = getValue(I.getCalledValue()); EVT Ty = Arg.getValueType(); if (CI->isZero()) Res = DAG.getConstant(-1ULL, sdl, Ty); else Res = DAG.getConstant(0, sdl, Ty); setValue(&I, Res); return; } case Intrinsic::is_constant: // If this wasn't constant-folded away by now, then it's not a // constant. setValue(&I, DAG.getConstant(0, sdl, MVT::i1)); return; case Intrinsic::annotation: case Intrinsic::ptr_annotation: case Intrinsic::launder_invariant_group: case Intrinsic::strip_invariant_group: // Drop the intrinsic, but forward the value setValue(&I, getValue(I.getOperand(0))); return; case Intrinsic::assume: case Intrinsic::var_annotation: case Intrinsic::sideeffect: // Discard annotate attributes, assumptions, and artificial side-effects. return; case Intrinsic::codeview_annotation: { // Emit a label associated with this metadata. MachineFunction &MF = DAG.getMachineFunction(); MCSymbol *Label = MF.getMMI().getContext().createTempSymbol("annotation", true); Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(0))->getMetadata(); MF.addCodeViewAnnotation(Label, cast<MDNode>(MD)); Res = DAG.getLabelNode(ISD::ANNOTATION_LABEL, sdl, getRoot(), Label); DAG.setRoot(Res); return; } case Intrinsic::init_trampoline: { const Function *F = cast<Function>(I.getArgOperand(1)->stripPointerCasts()); SDValue Ops[6]; Ops[0] = getRoot(); Ops[1] = getValue(I.getArgOperand(0)); Ops[2] = getValue(I.getArgOperand(1)); Ops[3] = getValue(I.getArgOperand(2)); Ops[4] = DAG.getSrcValue(I.getArgOperand(0)); Ops[5] = DAG.getSrcValue(F); Res = DAG.getNode(ISD::INIT_TRAMPOLINE, sdl, MVT::Other, Ops); DAG.setRoot(Res); return; } case Intrinsic::adjust_trampoline: setValue(&I, DAG.getNode(ISD::ADJUST_TRAMPOLINE, sdl, TLI.getPointerTy(DAG.getDataLayout()), getValue(I.getArgOperand(0)))); return; case Intrinsic::gcroot: { assert(DAG.getMachineFunction().getFunction().hasGC() && "only valid in functions with gc specified, enforced by Verifier"); assert(GFI && "implied by previous"); const Value *Alloca = I.getArgOperand(0)->stripPointerCasts(); const Constant *TypeMap = cast<Constant>(I.getArgOperand(1)); FrameIndexSDNode *FI = cast<FrameIndexSDNode>(getValue(Alloca).getNode()); GFI->addStackRoot(FI->getIndex(), TypeMap); return; } case Intrinsic::gcread: case Intrinsic::gcwrite: llvm_unreachable("GC failed to lower gcread/gcwrite intrinsics!"); case Intrinsic::flt_rounds: setValue(&I, DAG.getNode(ISD::FLT_ROUNDS_, sdl, MVT::i32)); return; case Intrinsic::expect: // Just replace __builtin_expect(exp, c) with EXP. setValue(&I, getValue(I.getArgOperand(0))); return; case Intrinsic::debugtrap: case Intrinsic::trap: { StringRef TrapFuncName = I.getAttributes() .getAttribute(AttributeList::FunctionIndex, "trap-func-name") .getValueAsString(); if (TrapFuncName.empty()) { ISD::NodeType Op = (Intrinsic == Intrinsic::trap) ? ISD::TRAP : ISD::DEBUGTRAP; DAG.setRoot(DAG.getNode(Op, sdl,MVT::Other, getRoot())); return; } TargetLowering::ArgListTy Args; TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(sdl).setChain(getRoot()).setLibCallee( CallingConv::C, I.getType(), DAG.getExternalSymbol(TrapFuncName.data(), TLI.getPointerTy(DAG.getDataLayout())), std::move(Args)); std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); DAG.setRoot(Result.second); return; } case Intrinsic::uadd_with_overflow: case Intrinsic::sadd_with_overflow: case Intrinsic::usub_with_overflow: case Intrinsic::ssub_with_overflow: case Intrinsic::umul_with_overflow: case Intrinsic::smul_with_overflow: { ISD::NodeType Op; switch (Intrinsic) { default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. case Intrinsic::uadd_with_overflow: Op = ISD::UADDO; break; case Intrinsic::sadd_with_overflow: Op = ISD::SADDO; break; case Intrinsic::usub_with_overflow: Op = ISD::USUBO; break; case Intrinsic::ssub_with_overflow: Op = ISD::SSUBO; break; case Intrinsic::umul_with_overflow: Op = ISD::UMULO; break; case Intrinsic::smul_with_overflow: Op = ISD::SMULO; break; } SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2 = getValue(I.getArgOperand(1)); EVT ResultVT = Op1.getValueType(); EVT OverflowVT = MVT::i1; if (ResultVT.isVector()) OverflowVT = EVT::getVectorVT( *Context, OverflowVT, ResultVT.getVectorNumElements()); SDVTList VTs = DAG.getVTList(ResultVT, OverflowVT); setValue(&I, DAG.getNode(Op, sdl, VTs, Op1, Op2)); return; } case Intrinsic::prefetch: { SDValue Ops[5]; unsigned rw = cast<ConstantInt>(I.getArgOperand(1))->getZExtValue(); auto Flags = rw == 0 ? MachineMemOperand::MOLoad :MachineMemOperand::MOStore; Ops[0] = DAG.getRoot(); Ops[1] = getValue(I.getArgOperand(0)); Ops[2] = getValue(I.getArgOperand(1)); Ops[3] = getValue(I.getArgOperand(2)); Ops[4] = getValue(I.getArgOperand(3)); SDValue Result = DAG.getMemIntrinsicNode(ISD::PREFETCH, sdl, DAG.getVTList(MVT::Other), Ops, EVT::getIntegerVT(*Context, 8), MachinePointerInfo(I.getArgOperand(0)), 0, /* align */ Flags); // Chain the prefetch in parallell with any pending loads, to stay out of // the way of later optimizations. PendingLoads.push_back(Result); Result = getRoot(); DAG.setRoot(Result); return; } case Intrinsic::lifetime_start: case Intrinsic::lifetime_end: { bool IsStart = (Intrinsic == Intrinsic::lifetime_start); // Stack coloring is not enabled in O0, discard region information. if (TM.getOptLevel() == CodeGenOpt::None) return; const int64_t ObjectSize = cast<ConstantInt>(I.getArgOperand(0))->getSExtValue(); Value *const ObjectPtr = I.getArgOperand(1); SmallVector<const Value *, 4> Allocas; GetUnderlyingObjects(ObjectPtr, Allocas, *DL); for (SmallVectorImpl<const Value*>::iterator Object = Allocas.begin(), E = Allocas.end(); Object != E; ++Object) { const AllocaInst *LifetimeObject = dyn_cast_or_null<AllocaInst>(*Object); // Could not find an Alloca. if (!LifetimeObject) continue; // First check that the Alloca is static, otherwise it won't have a // valid frame index. auto SI = FuncInfo.StaticAllocaMap.find(LifetimeObject); if (SI == FuncInfo.StaticAllocaMap.end()) return; const int FrameIndex = SI->second; int64_t Offset; if (GetPointerBaseWithConstantOffset( ObjectPtr, Offset, DAG.getDataLayout()) != LifetimeObject) Offset = -1; // Cannot determine offset from alloca to lifetime object. Res = DAG.getLifetimeNode(IsStart, sdl, getRoot(), FrameIndex, ObjectSize, Offset); DAG.setRoot(Res); } return; } case Intrinsic::invariant_start: // Discard region information. setValue(&I, DAG.getUNDEF(TLI.getPointerTy(DAG.getDataLayout()))); return; case Intrinsic::invariant_end: // Discard region information. return; case Intrinsic::clear_cache: /// FunctionName may be null. if (const char *FunctionName = TLI.getClearCacheBuiltinName()) lowerCallToExternalSymbol(I, FunctionName); return; case Intrinsic::donothing: // ignore return; case Intrinsic::experimental_stackmap: visitStackmap(I); return; case Intrinsic::experimental_patchpoint_void: case Intrinsic::experimental_patchpoint_i64: visitPatchpoint(&I); return; case Intrinsic::experimental_gc_statepoint: LowerStatepoint(ImmutableStatepoint(&I)); return; case Intrinsic::experimental_gc_result: visitGCResult(cast<GCResultInst>(I)); return; case Intrinsic::experimental_gc_relocate: visitGCRelocate(cast<GCRelocateInst>(I)); return; case Intrinsic::instrprof_increment: llvm_unreachable("instrprof failed to lower an increment"); case Intrinsic::instrprof_value_profile: llvm_unreachable("instrprof failed to lower a value profiling call"); case Intrinsic::localescape: { MachineFunction &MF = DAG.getMachineFunction(); const TargetInstrInfo *TII = DAG.getSubtarget().getInstrInfo(); // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission // is the same on all targets. for (unsigned Idx = 0, E = I.getNumArgOperands(); Idx < E; ++Idx) { Value *Arg = I.getArgOperand(Idx)->stripPointerCasts(); if (isa<ConstantPointerNull>(Arg)) continue; // Skip null pointers. They represent a hole in index space. AllocaInst *Slot = cast<AllocaInst>(Arg); assert(FuncInfo.StaticAllocaMap.count(Slot) && "can only escape static allocas"); int FI = FuncInfo.StaticAllocaMap[Slot]; MCSymbol *FrameAllocSym = MF.getMMI().getContext().getOrCreateFrameAllocSymbol( GlobalValue::dropLLVMManglingEscape(MF.getName()), Idx); BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, dl, TII->get(TargetOpcode::LOCAL_ESCAPE)) .addSym(FrameAllocSym) .addFrameIndex(FI); } return; } case Intrinsic::localrecover: { // i8* @llvm.localrecover(i8* %fn, i8* %fp, i32 %idx) MachineFunction &MF = DAG.getMachineFunction(); MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout(), 0); // Get the symbol that defines the frame offset. auto *Fn = cast<Function>(I.getArgOperand(0)->stripPointerCasts()); auto *Idx = cast<ConstantInt>(I.getArgOperand(2)); unsigned IdxVal = unsigned(Idx->getLimitedValue(std::numeric_limits<int>::max())); MCSymbol *FrameAllocSym = MF.getMMI().getContext().getOrCreateFrameAllocSymbol( GlobalValue::dropLLVMManglingEscape(Fn->getName()), IdxVal); // Create a MCSymbol for the label to avoid any target lowering // that would make this PC relative. SDValue OffsetSym = DAG.getMCSymbol(FrameAllocSym, PtrVT); SDValue OffsetVal = DAG.getNode(ISD::LOCAL_RECOVER, sdl, PtrVT, OffsetSym); // Add the offset to the FP. Value *FP = I.getArgOperand(1); SDValue FPVal = getValue(FP); SDValue Add = DAG.getNode(ISD::ADD, sdl, PtrVT, FPVal, OffsetVal); setValue(&I, Add); return; } case Intrinsic::eh_exceptionpointer: case Intrinsic::eh_exceptioncode: { // Get the exception pointer vreg, copy from it, and resize it to fit. const auto *CPI = cast<CatchPadInst>(I.getArgOperand(0)); MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout()); const TargetRegisterClass *PtrRC = TLI.getRegClassFor(PtrVT); unsigned VReg = FuncInfo.getCatchPadExceptionPointerVReg(CPI, PtrRC); SDValue N = DAG.getCopyFromReg(DAG.getEntryNode(), getCurSDLoc(), VReg, PtrVT); if (Intrinsic == Intrinsic::eh_exceptioncode) N = DAG.getZExtOrTrunc(N, getCurSDLoc(), MVT::i32); setValue(&I, N); return; } case Intrinsic::xray_customevent: { // Here we want to make sure that the intrinsic behaves as if it has a // specific calling convention, and only for x86_64. // FIXME: Support other platforms later. const auto &Triple = DAG.getTarget().getTargetTriple(); if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) return; SDLoc DL = getCurSDLoc(); SmallVector<SDValue, 8> Ops; // We want to say that we always want the arguments in registers. SDValue LogEntryVal = getValue(I.getArgOperand(0)); SDValue StrSizeVal = getValue(I.getArgOperand(1)); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Chain = getRoot(); Ops.push_back(LogEntryVal); Ops.push_back(StrSizeVal); Ops.push_back(Chain); // We need to enforce the calling convention for the callsite, so that // argument ordering is enforced correctly, and that register allocation can // see that some registers may be assumed clobbered and have to preserve // them across calls to the intrinsic. MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHABLE_EVENT_CALL, DL, NodeTys, Ops); SDValue patchableNode = SDValue(MN, 0); DAG.setRoot(patchableNode); setValue(&I, patchableNode); return; } case Intrinsic::xray_typedevent: { // Here we want to make sure that the intrinsic behaves as if it has a // specific calling convention, and only for x86_64. // FIXME: Support other platforms later. const auto &Triple = DAG.getTarget().getTargetTriple(); if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux()) return; SDLoc DL = getCurSDLoc(); SmallVector<SDValue, 8> Ops; // We want to say that we always want the arguments in registers. // It's unclear to me how manipulating the selection DAG here forces callers // to provide arguments in registers instead of on the stack. SDValue LogTypeId = getValue(I.getArgOperand(0)); SDValue LogEntryVal = getValue(I.getArgOperand(1)); SDValue StrSizeVal = getValue(I.getArgOperand(2)); SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); SDValue Chain = getRoot(); Ops.push_back(LogTypeId); Ops.push_back(LogEntryVal); Ops.push_back(StrSizeVal); Ops.push_back(Chain); // We need to enforce the calling convention for the callsite, so that // argument ordering is enforced correctly, and that register allocation can // see that some registers may be assumed clobbered and have to preserve // them across calls to the intrinsic. MachineSDNode *MN = DAG.getMachineNode( TargetOpcode::PATCHABLE_TYPED_EVENT_CALL, DL, NodeTys, Ops); SDValue patchableNode = SDValue(MN, 0); DAG.setRoot(patchableNode); setValue(&I, patchableNode); return; } case Intrinsic::experimental_deoptimize: LowerDeoptimizeCall(&I); return; case Intrinsic::experimental_vector_reduce_v2_fadd: case Intrinsic::experimental_vector_reduce_v2_fmul: case Intrinsic::experimental_vector_reduce_add: case Intrinsic::experimental_vector_reduce_mul: case Intrinsic::experimental_vector_reduce_and: case Intrinsic::experimental_vector_reduce_or: case Intrinsic::experimental_vector_reduce_xor: case Intrinsic::experimental_vector_reduce_smax: case Intrinsic::experimental_vector_reduce_smin: case Intrinsic::experimental_vector_reduce_umax: case Intrinsic::experimental_vector_reduce_umin: case Intrinsic::experimental_vector_reduce_fmax: case Intrinsic::experimental_vector_reduce_fmin: visitVectorReduce(I, Intrinsic); return; case Intrinsic::icall_branch_funnel: { SmallVector<SDValue, 16> Ops; Ops.push_back(getValue(I.getArgOperand(0))); int64_t Offset; auto *Base = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset( I.getArgOperand(1), Offset, DAG.getDataLayout())); if (!Base) report_fatal_error( "llvm.icall.branch.funnel operand must be a GlobalValue"); Ops.push_back(DAG.getTargetGlobalAddress(Base, getCurSDLoc(), MVT::i64, 0)); struct BranchFunnelTarget { int64_t Offset; SDValue Target; }; SmallVector<BranchFunnelTarget, 8> Targets; for (unsigned Op = 1, N = I.getNumArgOperands(); Op != N; Op += 2) { auto *ElemBase = dyn_cast<GlobalObject>(GetPointerBaseWithConstantOffset( I.getArgOperand(Op), Offset, DAG.getDataLayout())); if (ElemBase != Base) report_fatal_error("all llvm.icall.branch.funnel operands must refer " "to the same GlobalValue"); SDValue Val = getValue(I.getArgOperand(Op + 1)); auto *GA = dyn_cast<GlobalAddressSDNode>(Val); if (!GA) report_fatal_error( "llvm.icall.branch.funnel operand must be a GlobalValue"); Targets.push_back({Offset, DAG.getTargetGlobalAddress( GA->getGlobal(), getCurSDLoc(), Val.getValueType(), GA->getOffset())}); } llvm::sort(Targets, [](const BranchFunnelTarget &T1, const BranchFunnelTarget &T2) { return T1.Offset < T2.Offset; }); for (auto &T : Targets) { Ops.push_back(DAG.getTargetConstant(T.Offset, getCurSDLoc(), MVT::i32)); Ops.push_back(T.Target); } Ops.push_back(DAG.getRoot()); // Chain SDValue N(DAG.getMachineNode(TargetOpcode::ICALL_BRANCH_FUNNEL, getCurSDLoc(), MVT::Other, Ops), 0); DAG.setRoot(N); setValue(&I, N); HasTailCall = true; return; } case Intrinsic::wasm_landingpad_index: // Information this intrinsic contained has been transferred to // MachineFunction in SelectionDAGISel::PrepareEHLandingPad. We can safely // delete it now. return; case Intrinsic::aarch64_settag: case Intrinsic::aarch64_settag_zero: { const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); bool ZeroMemory = Intrinsic == Intrinsic::aarch64_settag_zero; SDValue Val = TSI.EmitTargetCodeForSetTag( DAG, getCurSDLoc(), getRoot(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), MachinePointerInfo(I.getArgOperand(0)), ZeroMemory); DAG.setRoot(Val); setValue(&I, Val); return; } case Intrinsic::ptrmask: { SDValue Ptr = getValue(I.getOperand(0)); SDValue Const = getValue(I.getOperand(1)); EVT DestVT = EVT(DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout())); setValue(&I, DAG.getNode(ISD::AND, getCurSDLoc(), DestVT, Ptr, DAG.getZExtOrTrunc(Const, getCurSDLoc(), DestVT))); return; } } } void SelectionDAGBuilder::visitConstrainedFPIntrinsic( const ConstrainedFPIntrinsic &FPI) { SDLoc sdl = getCurSDLoc(); unsigned Opcode; switch (FPI.getIntrinsicID()) { default: llvm_unreachable("Impossible intrinsic"); // Can't reach here. case Intrinsic::experimental_constrained_fadd: Opcode = ISD::STRICT_FADD; break; case Intrinsic::experimental_constrained_fsub: Opcode = ISD::STRICT_FSUB; break; case Intrinsic::experimental_constrained_fmul: Opcode = ISD::STRICT_FMUL; break; case Intrinsic::experimental_constrained_fdiv: Opcode = ISD::STRICT_FDIV; break; case Intrinsic::experimental_constrained_frem: Opcode = ISD::STRICT_FREM; break; case Intrinsic::experimental_constrained_fma: Opcode = ISD::STRICT_FMA; break; case Intrinsic::experimental_constrained_fptosi: Opcode = ISD::STRICT_FP_TO_SINT; break; case Intrinsic::experimental_constrained_fptoui: Opcode = ISD::STRICT_FP_TO_UINT; break; case Intrinsic::experimental_constrained_fptrunc: Opcode = ISD::STRICT_FP_ROUND; break; case Intrinsic::experimental_constrained_fpext: Opcode = ISD::STRICT_FP_EXTEND; break; case Intrinsic::experimental_constrained_sqrt: Opcode = ISD::STRICT_FSQRT; break; case Intrinsic::experimental_constrained_pow: Opcode = ISD::STRICT_FPOW; break; case Intrinsic::experimental_constrained_powi: Opcode = ISD::STRICT_FPOWI; break; case Intrinsic::experimental_constrained_sin: Opcode = ISD::STRICT_FSIN; break; case Intrinsic::experimental_constrained_cos: Opcode = ISD::STRICT_FCOS; break; case Intrinsic::experimental_constrained_exp: Opcode = ISD::STRICT_FEXP; break; case Intrinsic::experimental_constrained_exp2: Opcode = ISD::STRICT_FEXP2; break; case Intrinsic::experimental_constrained_log: Opcode = ISD::STRICT_FLOG; break; case Intrinsic::experimental_constrained_log10: Opcode = ISD::STRICT_FLOG10; break; case Intrinsic::experimental_constrained_log2: Opcode = ISD::STRICT_FLOG2; break; case Intrinsic::experimental_constrained_rint: Opcode = ISD::STRICT_FRINT; break; case Intrinsic::experimental_constrained_nearbyint: Opcode = ISD::STRICT_FNEARBYINT; break; case Intrinsic::experimental_constrained_maxnum: Opcode = ISD::STRICT_FMAXNUM; break; case Intrinsic::experimental_constrained_minnum: Opcode = ISD::STRICT_FMINNUM; break; case Intrinsic::experimental_constrained_ceil: Opcode = ISD::STRICT_FCEIL; break; case Intrinsic::experimental_constrained_floor: Opcode = ISD::STRICT_FFLOOR; break; case Intrinsic::experimental_constrained_round: Opcode = ISD::STRICT_FROUND; break; case Intrinsic::experimental_constrained_trunc: Opcode = ISD::STRICT_FTRUNC; break; } const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Chain = getRoot(); SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), FPI.getType(), ValueVTs); ValueVTs.push_back(MVT::Other); // Out chain SDVTList VTs = DAG.getVTList(ValueVTs); SDValue Result; if (Opcode == ISD::STRICT_FP_ROUND) Result = DAG.getNode(Opcode, sdl, VTs, { Chain, getValue(FPI.getArgOperand(0)), DAG.getTargetConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout())) }); else if (FPI.isUnaryOp()) Result = DAG.getNode(Opcode, sdl, VTs, { Chain, getValue(FPI.getArgOperand(0)) }); else if (FPI.isTernaryOp()) Result = DAG.getNode(Opcode, sdl, VTs, { Chain, getValue(FPI.getArgOperand(0)), getValue(FPI.getArgOperand(1)), getValue(FPI.getArgOperand(2)) }); else Result = DAG.getNode(Opcode, sdl, VTs, { Chain, getValue(FPI.getArgOperand(0)), getValue(FPI.getArgOperand(1)) }); if (FPI.getExceptionBehavior() != ConstrainedFPIntrinsic::ExceptionBehavior::ebIgnore) { SDNodeFlags Flags; Flags.setFPExcept(true); Result->setFlags(Flags); } assert(Result.getNode()->getNumValues() == 2); SDValue OutChain = Result.getValue(1); DAG.setRoot(OutChain); SDValue FPResult = Result.getValue(0); setValue(&FPI, FPResult); } std::pair<SDValue, SDValue> SelectionDAGBuilder::lowerInvokable(TargetLowering::CallLoweringInfo &CLI, const BasicBlock *EHPadBB) { MachineFunction &MF = DAG.getMachineFunction(); MachineModuleInfo &MMI = MF.getMMI(); MCSymbol *BeginLabel = nullptr; if (EHPadBB) { // Insert a label before the invoke call to mark the try range. This can be // used to detect deletion of the invoke via the MachineModuleInfo. BeginLabel = MMI.getContext().createTempSymbol(); // For SjLj, keep track of which landing pads go with which invokes // so as to maintain the ordering of pads in the LSDA. unsigned CallSiteIndex = MMI.getCurrentCallSite(); if (CallSiteIndex) { MF.setCallSiteBeginLabel(BeginLabel, CallSiteIndex); LPadToCallSiteMap[FuncInfo.MBBMap[EHPadBB]].push_back(CallSiteIndex); // Now that the call site is handled, stop tracking it. MMI.setCurrentCallSite(0); } // Both PendingLoads and PendingExports must be flushed here; // this call might not return. (void)getRoot(); DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getControlRoot(), BeginLabel)); CLI.setChain(getRoot()); } const TargetLowering &TLI = DAG.getTargetLoweringInfo(); std::pair<SDValue, SDValue> Result = TLI.LowerCallTo(CLI); assert((CLI.IsTailCall || Result.second.getNode()) && "Non-null chain expected with non-tail call!"); assert((Result.second.getNode() || !Result.first.getNode()) && "Null value expected with tail call!"); if (!Result.second.getNode()) { // As a special case, a null chain means that a tail call has been emitted // and the DAG root is already updated. HasTailCall = true; // Since there's no actual continuation from this block, nothing can be // relying on us setting vregs for them. PendingExports.clear(); } else { DAG.setRoot(Result.second); } if (EHPadBB) { // Insert a label at the end of the invoke call to mark the try range. This // can be used to detect deletion of the invoke via the MachineModuleInfo. MCSymbol *EndLabel = MMI.getContext().createTempSymbol(); DAG.setRoot(DAG.getEHLabel(getCurSDLoc(), getRoot(), EndLabel)); // Inform MachineModuleInfo of range. auto Pers = classifyEHPersonality(FuncInfo.Fn->getPersonalityFn()); // There is a platform (e.g. wasm) that uses funclet style IR but does not // actually use outlined funclets and their LSDA info style. if (MF.hasEHFunclets() && isFuncletEHPersonality(Pers)) { assert(CLI.CS); WinEHFuncInfo *EHInfo = DAG.getMachineFunction().getWinEHFuncInfo(); EHInfo->addIPToStateRange(cast<InvokeInst>(CLI.CS.getInstruction()), BeginLabel, EndLabel); } else if (!isScopedEHPersonality(Pers)) { MF.addInvoke(FuncInfo.MBBMap[EHPadBB], BeginLabel, EndLabel); } } return Result; } void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool isTailCall, const BasicBlock *EHPadBB) { auto &DL = DAG.getDataLayout(); FunctionType *FTy = CS.getFunctionType(); Type *RetTy = CS.getType(); TargetLowering::ArgListTy Args; Args.reserve(CS.arg_size()); const Value *SwiftErrorVal = nullptr; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // We can't tail call inside a function with a swifterror argument. Lowering // does not support this yet. It would have to move into the swifterror // register before the call. auto *Caller = CS.getInstruction()->getParent()->getParent(); if (TLI.supportSwiftError() && Caller->getAttributes().hasAttrSomewhere(Attribute::SwiftError)) isTailCall = false; for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) { TargetLowering::ArgListEntry Entry; const Value *V = *i; // Skip empty types if (V->getType()->isEmptyTy()) continue; SDValue ArgNode = getValue(V); Entry.Node = ArgNode; Entry.Ty = V->getType(); Entry.setAttributes(&CS, i - CS.arg_begin()); // Use swifterror virtual register as input to the call. if (Entry.IsSwiftError && TLI.supportSwiftError()) { SwiftErrorVal = V; // We find the virtual register for the actual swifterror argument. // Instead of using the Value, we use the virtual register instead. Entry.Node = DAG.getRegister( SwiftError.getOrCreateVRegUseAt(CS.getInstruction(), FuncInfo.MBB, V), EVT(TLI.getPointerTy(DL))); } Args.push_back(Entry); // If we have an explicit sret argument that is an Instruction, (i.e., it // might point to function-local memory), we can't meaningfully tail-call. if (Entry.IsSRet && isa<Instruction>(V)) isTailCall = false; } // Check if target-independent constraints permit a tail call here. // Target-dependent constraints are checked within TLI->LowerCallTo. if (isTailCall && !isInTailCallPosition(CS, DAG.getTarget())) isTailCall = false; // Disable tail calls if there is an swifterror argument. Targets have not // been updated to support tail calls. if (TLI.supportSwiftError() && SwiftErrorVal) isTailCall = false; TargetLowering::CallLoweringInfo CLI(DAG); CLI.setDebugLoc(getCurSDLoc()) .setChain(getRoot()) .setCallee(RetTy, FTy, Callee, std::move(Args), CS) .setTailCall(isTailCall) .setConvergent(CS.isConvergent()); std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); if (Result.first.getNode()) { const Instruction *Inst = CS.getInstruction(); Result.first = lowerRangeToAssertZExt(DAG, *Inst, Result.first); setValue(Inst, Result.first); } // The last element of CLI.InVals has the SDValue for swifterror return. // Here we copy it to a virtual register and update SwiftErrorMap for // book-keeping. if (SwiftErrorVal && TLI.supportSwiftError()) { // Get the last element of InVals. SDValue Src = CLI.InVals.back(); Register VReg = SwiftError.getOrCreateVRegDefAt( CS.getInstruction(), FuncInfo.MBB, SwiftErrorVal); SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src); DAG.setRoot(CopyNode); } } static SDValue getMemCmpLoad(const Value *PtrVal, MVT LoadVT, SelectionDAGBuilder &Builder) { // Check to see if this load can be trivially constant folded, e.g. if the // input is from a string literal. if (const Constant *LoadInput = dyn_cast<Constant>(PtrVal)) { // Cast pointer to the type we really want to load. Type *LoadTy = Type::getIntNTy(PtrVal->getContext(), LoadVT.getScalarSizeInBits()); if (LoadVT.isVector()) LoadTy = VectorType::get(LoadTy, LoadVT.getVectorNumElements()); LoadInput = ConstantExpr::getBitCast(const_cast<Constant *>(LoadInput), PointerType::getUnqual(LoadTy)); if (const Constant *LoadCst = ConstantFoldLoadFromConstPtr( const_cast<Constant *>(LoadInput), LoadTy, *Builder.DL)) return Builder.getValue(LoadCst); } // Otherwise, we have to emit the load. If the pointer is to unfoldable but // still constant memory, the input chain can be the entry node. SDValue Root; bool ConstantMemory = false; // Do not serialize (non-volatile) loads of constant memory with anything. if (Builder.AA && Builder.AA->pointsToConstantMemory(PtrVal)) { Root = Builder.DAG.getEntryNode(); ConstantMemory = true; } else { // Do not serialize non-volatile loads against each other. Root = Builder.DAG.getRoot(); } SDValue Ptr = Builder.getValue(PtrVal); SDValue LoadVal = Builder.DAG.getLoad(LoadVT, Builder.getCurSDLoc(), Root, Ptr, MachinePointerInfo(PtrVal), /* Alignment = */ 1); if (!ConstantMemory) Builder.PendingLoads.push_back(LoadVal.getValue(1)); return LoadVal; } /// Record the value for an instruction that produces an integer result, /// converting the type where necessary. void SelectionDAGBuilder::processIntegerCallValue(const Instruction &I, SDValue Value, bool IsSigned) { EVT VT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType(), true); if (IsSigned) Value = DAG.getSExtOrTrunc(Value, getCurSDLoc(), VT); else Value = DAG.getZExtOrTrunc(Value, getCurSDLoc(), VT); setValue(&I, Value); } /// See if we can lower a memcmp call into an optimized form. If so, return /// true and lower it. Otherwise return false, and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitMemCmpCall(const CallInst &I) { const Value *LHS = I.getArgOperand(0), *RHS = I.getArgOperand(1); const Value *Size = I.getArgOperand(2); const ConstantInt *CSize = dyn_cast<ConstantInt>(Size); if (CSize && CSize->getZExtValue() == 0) { EVT CallVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), I.getType(), true); setValue(&I, DAG.getConstant(0, getCurSDLoc(), CallVT)); return true; } const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemcmp( DAG, getCurSDLoc(), DAG.getRoot(), getValue(LHS), getValue(RHS), getValue(Size), MachinePointerInfo(LHS), MachinePointerInfo(RHS)); if (Res.first.getNode()) { processIntegerCallValue(I, Res.first, true); PendingLoads.push_back(Res.second); return true; } // memcmp(S1,S2,2) != 0 -> (*(short*)LHS != *(short*)RHS) != 0 // memcmp(S1,S2,4) != 0 -> (*(int*)LHS != *(int*)RHS) != 0 if (!CSize || !isOnlyUsedInZeroEqualityComparison(&I)) return false; // If the target has a fast compare for the given size, it will return a // preferred load type for that size. Require that the load VT is legal and // that the target supports unaligned loads of that type. Otherwise, return // INVALID. auto hasFastLoadsAndCompare = [&](unsigned NumBits) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); MVT LVT = TLI.hasFastEqualityCompare(NumBits); if (LVT != MVT::INVALID_SIMPLE_VALUE_TYPE) { // TODO: Handle 5 byte compare as 4-byte + 1 byte. // TODO: Handle 8 byte compare on x86-32 as two 32-bit loads. // TODO: Check alignment of src and dest ptrs. unsigned DstAS = LHS->getType()->getPointerAddressSpace(); unsigned SrcAS = RHS->getType()->getPointerAddressSpace(); if (!TLI.isTypeLegal(LVT) || !TLI.allowsMisalignedMemoryAccesses(LVT, SrcAS) || !TLI.allowsMisalignedMemoryAccesses(LVT, DstAS)) LVT = MVT::INVALID_SIMPLE_VALUE_TYPE; } return LVT; }; // This turns into unaligned loads. We only do this if the target natively // supports the MVT we'll be loading or if it is small enough (<= 4) that // we'll only produce a small number of byte loads. MVT LoadVT; unsigned NumBitsToCompare = CSize->getZExtValue() * 8; switch (NumBitsToCompare) { default: return false; case 16: LoadVT = MVT::i16; break; case 32: LoadVT = MVT::i32; break; case 64: case 128: case 256: LoadVT = hasFastLoadsAndCompare(NumBitsToCompare); break; } if (LoadVT == MVT::INVALID_SIMPLE_VALUE_TYPE) return false; SDValue LoadL = getMemCmpLoad(LHS, LoadVT, *this); SDValue LoadR = getMemCmpLoad(RHS, LoadVT, *this); // Bitcast to a wide integer type if the loads are vectors. if (LoadVT.isVector()) { EVT CmpVT = EVT::getIntegerVT(LHS->getContext(), LoadVT.getSizeInBits()); LoadL = DAG.getBitcast(CmpVT, LoadL); LoadR = DAG.getBitcast(CmpVT, LoadR); } SDValue Cmp = DAG.getSetCC(getCurSDLoc(), MVT::i1, LoadL, LoadR, ISD::SETNE); processIntegerCallValue(I, Cmp, false); return true; } /// See if we can lower a memchr call into an optimized form. If so, return /// true and lower it. Otherwise return false, and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitMemChrCall(const CallInst &I) { const Value *Src = I.getArgOperand(0); const Value *Char = I.getArgOperand(1); const Value *Length = I.getArgOperand(2); const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForMemchr(DAG, getCurSDLoc(), DAG.getRoot(), getValue(Src), getValue(Char), getValue(Length), MachinePointerInfo(Src)); if (Res.first.getNode()) { setValue(&I, Res.first); PendingLoads.push_back(Res.second); return true; } return false; } /// See if we can lower a mempcpy call into an optimized form. If so, return /// true and lower it. Otherwise return false, and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitMemPCpyCall(const CallInst &I) { SDValue Dst = getValue(I.getArgOperand(0)); SDValue Src = getValue(I.getArgOperand(1)); SDValue Size = getValue(I.getArgOperand(2)); unsigned DstAlign = DAG.InferPtrAlignment(Dst); unsigned SrcAlign = DAG.InferPtrAlignment(Src); unsigned Align = std::min(DstAlign, SrcAlign); if (Align == 0) // Alignment of one or both could not be inferred. Align = 1; // 0 and 1 both specify no alignment, but 0 is reserved. bool isVol = false; SDLoc sdl = getCurSDLoc(); // In the mempcpy context we need to pass in a false value for isTailCall // because the return pointer needs to be adjusted by the size of // the copied memory. SDValue MC = DAG.getMemcpy(getRoot(), sdl, Dst, Src, Size, Align, isVol, false, /*isTailCall=*/false, MachinePointerInfo(I.getArgOperand(0)), MachinePointerInfo(I.getArgOperand(1))); assert(MC.getNode() != nullptr && "** memcpy should not be lowered as TailCall in mempcpy context **"); DAG.setRoot(MC); // Check if Size needs to be truncated or extended. Size = DAG.getSExtOrTrunc(Size, sdl, Dst.getValueType()); // Adjust return pointer to point just past the last dst byte. SDValue DstPlusSize = DAG.getNode(ISD::ADD, sdl, Dst.getValueType(), Dst, Size); setValue(&I, DstPlusSize); return true; } /// See if we can lower a strcpy call into an optimized form. If so, return /// true and lower it, otherwise return false and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitStrCpyCall(const CallInst &I, bool isStpcpy) { const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrcpy(DAG, getCurSDLoc(), getRoot(), getValue(Arg0), getValue(Arg1), MachinePointerInfo(Arg0), MachinePointerInfo(Arg1), isStpcpy); if (Res.first.getNode()) { setValue(&I, Res.first); DAG.setRoot(Res.second); return true; } return false; } /// See if we can lower a strcmp call into an optimized form. If so, return /// true and lower it, otherwise return false and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitStrCmpCall(const CallInst &I) { const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrcmp(DAG, getCurSDLoc(), DAG.getRoot(), getValue(Arg0), getValue(Arg1), MachinePointerInfo(Arg0), MachinePointerInfo(Arg1)); if (Res.first.getNode()) { processIntegerCallValue(I, Res.first, true); PendingLoads.push_back(Res.second); return true; } return false; } /// See if we can lower a strlen call into an optimized form. If so, return /// true and lower it, otherwise return false and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitStrLenCall(const CallInst &I) { const Value *Arg0 = I.getArgOperand(0); const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrlen(DAG, getCurSDLoc(), DAG.getRoot(), getValue(Arg0), MachinePointerInfo(Arg0)); if (Res.first.getNode()) { processIntegerCallValue(I, Res.first, false); PendingLoads.push_back(Res.second); return true; } return false; } /// See if we can lower a strnlen call into an optimized form. If so, return /// true and lower it, otherwise return false and it will be lowered like a /// normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitStrNLenCall(const CallInst &I) { const Value *Arg0 = I.getArgOperand(0), *Arg1 = I.getArgOperand(1); const SelectionDAGTargetInfo &TSI = DAG.getSelectionDAGInfo(); std::pair<SDValue, SDValue> Res = TSI.EmitTargetCodeForStrnlen(DAG, getCurSDLoc(), DAG.getRoot(), getValue(Arg0), getValue(Arg1), MachinePointerInfo(Arg0)); if (Res.first.getNode()) { processIntegerCallValue(I, Res.first, false); PendingLoads.push_back(Res.second); return true; } return false; } /// See if we can lower a unary floating-point operation into an SDNode with /// the specified Opcode. If so, return true and lower it, otherwise return /// false and it will be lowered like a normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitUnaryFloatCall(const CallInst &I, unsigned Opcode) { // We already checked this call's prototype; verify it doesn't modify errno. if (!I.onlyReadsMemory()) return false; SDValue Tmp = getValue(I.getArgOperand(0)); setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), Tmp.getValueType(), Tmp)); return true; } /// See if we can lower a binary floating-point operation into an SDNode with /// the specified Opcode. If so, return true and lower it. Otherwise return /// false, and it will be lowered like a normal call. /// The caller already checked that \p I calls the appropriate LibFunc with a /// correct prototype. bool SelectionDAGBuilder::visitBinaryFloatCall(const CallInst &I, unsigned Opcode) { // We already checked this call's prototype; verify it doesn't modify errno. if (!I.onlyReadsMemory()) return false; SDValue Tmp0 = getValue(I.getArgOperand(0)); SDValue Tmp1 = getValue(I.getArgOperand(1)); EVT VT = Tmp0.getValueType(); setValue(&I, DAG.getNode(Opcode, getCurSDLoc(), VT, Tmp0, Tmp1)); return true; } void SelectionDAGBuilder::visitCall(const CallInst &I) { // Handle inline assembly differently. if (isa<InlineAsm>(I.getCalledValue())) { visitInlineAsm(&I); return; } if (Function *F = I.getCalledFunction()) { if (F->isDeclaration()) { // Is this an LLVM intrinsic or a target-specific intrinsic? unsigned IID = F->getIntrinsicID(); if (!IID) if (const TargetIntrinsicInfo *II = TM.getIntrinsicInfo()) IID = II->getIntrinsicID(F); if (IID) { visitIntrinsicCall(I, IID); return; } } // Check for well-known libc/libm calls. If the function is internal, it // can't be a library call. Don't do the check if marked as nobuiltin for // some reason or the call site requires strict floating point semantics. LibFunc Func; if (!I.isNoBuiltin() && !I.isStrictFP() && !F->hasLocalLinkage() && F->hasName() && LibInfo->getLibFunc(*F, Func) && LibInfo->hasOptimizedCodeGen(Func)) { switch (Func) { default: break; case LibFunc_copysign: case LibFunc_copysignf: case LibFunc_copysignl: // We already checked this call's prototype; verify it doesn't modify // errno. if (I.onlyReadsMemory()) { SDValue LHS = getValue(I.getArgOperand(0)); SDValue RHS = getValue(I.getArgOperand(1)); setValue(&I, DAG.getNode(ISD::FCOPYSIGN, getCurSDLoc(), LHS.getValueType(), LHS, RHS)); return; } break; case LibFunc_fabs: case LibFunc_fabsf: case LibFunc_fabsl: if (visitUnaryFloatCall(I, ISD::FABS)) return; break; case LibFunc_fmin: case LibFunc_fminf: case LibFunc_fminl: if (visitBinaryFloatCall(I, ISD::FMINNUM)) return; break; case LibFunc_fmax: case LibFunc_fmaxf: case LibFunc_fmaxl: if (visitBinaryFloatCall(I, ISD::FMAXNUM)) return; break; case LibFunc_sin: case LibFunc_sinf: case LibFunc_sinl: if (visitUnaryFloatCall(I, ISD::FSIN)) return; break; case LibFunc_cos: case LibFunc_cosf: case LibFunc_cosl: if (visitUnaryFloatCall(I, ISD::FCOS)) return; break; case LibFunc_sqrt: case LibFunc_sqrtf: case LibFunc_sqrtl: case LibFunc_sqrt_finite: case LibFunc_sqrtf_finite: case LibFunc_sqrtl_finite: if (visitUnaryFloatCall(I, ISD::FSQRT)) return; break; case LibFunc_floor: case LibFunc_floorf: case LibFunc_floorl: if (visitUnaryFloatCall(I, ISD::FFLOOR)) return; break; case LibFunc_nearbyint: case LibFunc_nearbyintf: case LibFunc_nearbyintl: if (visitUnaryFloatCall(I, ISD::FNEARBYINT)) return; break; case LibFunc_ceil: case LibFunc_ceilf: case LibFunc_ceill: if (visitUnaryFloatCall(I, ISD::FCEIL)) return; break; case LibFunc_rint: case LibFunc_rintf: case LibFunc_rintl: if (visitUnaryFloatCall(I, ISD::FRINT)) return; break; case LibFunc_round: case LibFunc_roundf: case LibFunc_roundl: if (visitUnaryFloatCall(I, ISD::FROUND)) return; break; case LibFunc_trunc: case LibFunc_truncf: case LibFunc_truncl: if (visitUnaryFloatCall(I, ISD::FTRUNC)) return; break; case LibFunc_log2: case LibFunc_log2f: case LibFunc_log2l: if (visitUnaryFloatCall(I, ISD::FLOG2)) return; break; case LibFunc_exp2: case LibFunc_exp2f: case LibFunc_exp2l: if (visitUnaryFloatCall(I, ISD::FEXP2)) return; break; case LibFunc_memcmp: if (visitMemCmpCall(I)) return; break; case LibFunc_mempcpy: if (visitMemPCpyCall(I)) return; break; case LibFunc_memchr: if (visitMemChrCall(I)) return; break; case LibFunc_strcpy: if (visitStrCpyCall(I, false)) return; break; case LibFunc_stpcpy: if (visitStrCpyCall(I, true)) return; break; case LibFunc_strcmp: if (visitStrCmpCall(I)) return; break; case LibFunc_strlen: if (visitStrLenCall(I)) return; break; case LibFunc_strnlen: if (visitStrNLenCall(I)) return; break; } } } // Deopt bundles are lowered in LowerCallSiteWithDeoptBundle, and we don't // have to do anything here to lower funclet bundles. assert(!I.hasOperandBundlesOtherThan( {LLVMContext::OB_deopt, LLVMContext::OB_funclet}) && "Cannot lower calls with arbitrary operand bundles!"); SDValue Callee = getValue(I.getCalledValue()); if (I.countOperandBundlesOfType(LLVMContext::OB_deopt)) LowerCallSiteWithDeoptBundle(&I, Callee, nullptr); else // Check if we can potentially perform a tail call. More detailed checking // is be done within LowerCallTo, after more information about the call is // known. LowerCallTo(&I, Callee, I.isTailCall()); } namespace { /// AsmOperandInfo - This contains information for each constraint that we are /// lowering. class SDISelAsmOperandInfo : public TargetLowering::AsmOperandInfo { public: /// CallOperand - If this is the result output operand or a clobber /// this is null, otherwise it is the incoming operand to the CallInst. /// This gets modified as the asm is processed. SDValue CallOperand; /// AssignedRegs - If this is a register or register class operand, this /// contains the set of register corresponding to the operand. RegsForValue AssignedRegs; explicit SDISelAsmOperandInfo(const TargetLowering::AsmOperandInfo &info) : TargetLowering::AsmOperandInfo(info), CallOperand(nullptr, 0) { } /// Whether or not this operand accesses memory bool hasMemory(const TargetLowering &TLI) const { // Indirect operand accesses access memory. if (isIndirect) return true; for (const auto &Code : Codes) if (TLI.getConstraintType(Code) == TargetLowering::C_Memory) return true; return false; } /// getCallOperandValEVT - Return the EVT of the Value* that this operand /// corresponds to. If there is no Value* for this operand, it returns /// MVT::Other. EVT getCallOperandValEVT(LLVMContext &Context, const TargetLowering &TLI, const DataLayout &DL) const { if (!CallOperandVal) return MVT::Other; if (isa<BasicBlock>(CallOperandVal)) return TLI.getPointerTy(DL); llvm::Type *OpTy = CallOperandVal->getType(); // FIXME: code duplicated from TargetLowering::ParseConstraints(). // If this is an indirect operand, the operand is a pointer to the // accessed type. if (isIndirect) { PointerType *PtrTy = dyn_cast<PointerType>(OpTy); if (!PtrTy) report_fatal_error("Indirect operand for inline asm not a pointer!"); OpTy = PtrTy->getElementType(); } // Look for vector wrapped in a struct. e.g. { <16 x i8> }. if (StructType *STy = dyn_cast<StructType>(OpTy)) if (STy->getNumElements() == 1) OpTy = STy->getElementType(0); // If OpTy is not a single value, it may be a struct/union that we // can tile with integers. if (!OpTy->isSingleValueType() && OpTy->isSized()) { unsigned BitSize = DL.getTypeSizeInBits(OpTy); switch (BitSize) { default: break; case 1: case 8: case 16: case 32: case 64: case 128: OpTy = IntegerType::get(Context, BitSize); break; } } return TLI.getValueType(DL, OpTy, true); } }; using SDISelAsmOperandInfoVector = SmallVector<SDISelAsmOperandInfo, 16>; } // end anonymous namespace /// Make sure that the output operand \p OpInfo and its corresponding input /// operand \p MatchingOpInfo have compatible constraint types (otherwise error /// out). static void patchMatchingInput(const SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &MatchingOpInfo, SelectionDAG &DAG) { if (OpInfo.ConstraintVT == MatchingOpInfo.ConstraintVT) return; const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo(); const auto &TLI = DAG.getTargetLoweringInfo(); std::pair<unsigned, const TargetRegisterClass *> MatchRC = TLI.getRegForInlineAsmConstraint(TRI, OpInfo.ConstraintCode, OpInfo.ConstraintVT); std::pair<unsigned, const TargetRegisterClass *> InputRC = TLI.getRegForInlineAsmConstraint(TRI, MatchingOpInfo.ConstraintCode, MatchingOpInfo.ConstraintVT); if ((OpInfo.ConstraintVT.isInteger() != MatchingOpInfo.ConstraintVT.isInteger()) || (MatchRC.second != InputRC.second)) { // FIXME: error out in a more elegant fashion report_fatal_error("Unsupported asm: input constraint" " with a matching output constraint of" " incompatible type!"); } MatchingOpInfo.ConstraintVT = OpInfo.ConstraintVT; } /// Get a direct memory input to behave well as an indirect operand. /// This may introduce stores, hence the need for a \p Chain. /// \return The (possibly updated) chain. static SDValue getAddressForMemoryInput(SDValue Chain, const SDLoc &Location, SDISelAsmOperandInfo &OpInfo, SelectionDAG &DAG) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If we don't have an indirect input, put it in the constpool if we can, // otherwise spill it to a stack slot. // TODO: This isn't quite right. We need to handle these according to // the addressing mode that the constraint wants. Also, this may take // an additional register for the computation and we don't want that // either. // If the operand is a float, integer, or vector constant, spill to a // constant pool entry to get its address. const Value *OpVal = OpInfo.CallOperandVal; if (isa<ConstantFP>(OpVal) || isa<ConstantInt>(OpVal) || isa<ConstantVector>(OpVal) || isa<ConstantDataVector>(OpVal)) { OpInfo.CallOperand = DAG.getConstantPool( cast<Constant>(OpVal), TLI.getPointerTy(DAG.getDataLayout())); return Chain; } // Otherwise, create a stack slot and emit a store to it before the asm. Type *Ty = OpVal->getType(); auto &DL = DAG.getDataLayout(); uint64_t TySize = DL.getTypeAllocSize(Ty); unsigned Align = DL.getPrefTypeAlignment(Ty); MachineFunction &MF = DAG.getMachineFunction(); int SSFI = MF.getFrameInfo().CreateStackObject(TySize, Align, false); SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getFrameIndexTy(DL)); Chain = DAG.getTruncStore(Chain, Location, OpInfo.CallOperand, StackSlot, MachinePointerInfo::getFixedStack(MF, SSFI), TLI.getMemValueType(DL, Ty)); OpInfo.CallOperand = StackSlot; return Chain; } /// GetRegistersForValue - Assign registers (virtual or physical) for the /// specified operand. We prefer to assign virtual registers, to allow the /// register allocator to handle the assignment process. However, if the asm /// uses features that we can't model on machineinstrs, we have SDISel do the /// allocation. This produces generally horrible, but correct, code. /// /// OpInfo describes the operand /// RefOpInfo describes the matching operand if any, the operand otherwise static void GetRegistersForValue(SelectionDAG &DAG, const SDLoc &DL, SDISelAsmOperandInfo &OpInfo, SDISelAsmOperandInfo &RefOpInfo) { LLVMContext &Context = *DAG.getContext(); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); MachineFunction &MF = DAG.getMachineFunction(); SmallVector<unsigned, 4> Regs; const TargetRegisterInfo &TRI = *MF.getSubtarget().getRegisterInfo(); // No work to do for memory operations. if (OpInfo.ConstraintType == TargetLowering::C_Memory) return; // If this is a constraint for a single physreg, or a constraint for a // register class, find it. unsigned AssignedReg; const TargetRegisterClass *RC; std::tie(AssignedReg, RC) = TLI.getRegForInlineAsmConstraint( &TRI, RefOpInfo.ConstraintCode, RefOpInfo.ConstraintVT); // RC is unset only on failure. Return immediately. if (!RC) return; // Get the actual register value type. This is important, because the user // may have asked for (e.g.) the AX register in i32 type. We need to // remember that AX is actually i16 to get the right extension. const MVT RegVT = *TRI.legalclasstypes_begin(*RC); if (OpInfo.ConstraintVT != MVT::Other) { // If this is an FP operand in an integer register (or visa versa), or more // generally if the operand value disagrees with the register class we plan // to stick it in, fix the operand type. // // If this is an input value, the bitcast to the new type is done now. // Bitcast for output value is done at the end of visitInlineAsm(). if ((OpInfo.Type == InlineAsm::isOutput || OpInfo.Type == InlineAsm::isInput) && !TRI.isTypeLegalForClass(*RC, OpInfo.ConstraintVT)) { // Try to convert to the first EVT that the reg class contains. If the // types are identical size, use a bitcast to convert (e.g. two differing // vector types). Note: output bitcast is done at the end of // visitInlineAsm(). if (RegVT.getSizeInBits() == OpInfo.ConstraintVT.getSizeInBits()) { // Exclude indirect inputs while they are unsupported because the code // to perform the load is missing and thus OpInfo.CallOperand still // refers to the input address rather than the pointed-to value. if (OpInfo.Type == InlineAsm::isInput && !OpInfo.isIndirect) OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, RegVT, OpInfo.CallOperand); OpInfo.ConstraintVT = RegVT; // If the operand is an FP value and we want it in integer registers, // use the corresponding integer type. This turns an f64 value into // i64, which can be passed with two i32 values on a 32-bit machine. } else if (RegVT.isInteger() && OpInfo.ConstraintVT.isFloatingPoint()) { MVT VT = MVT::getIntegerVT(OpInfo.ConstraintVT.getSizeInBits()); if (OpInfo.Type == InlineAsm::isInput) OpInfo.CallOperand = DAG.getNode(ISD::BITCAST, DL, VT, OpInfo.CallOperand); OpInfo.ConstraintVT = VT; } } } // No need to allocate a matching input constraint since the constraint it's // matching to has already been allocated. if (OpInfo.isMatchingInputConstraint()) return; EVT ValueVT = OpInfo.ConstraintVT; if (OpInfo.ConstraintVT == MVT::Other) ValueVT = RegVT; // Initialize NumRegs. unsigned NumRegs = 1; if (OpInfo.ConstraintVT != MVT::Other) NumRegs = TLI.getNumRegisters(Context, OpInfo.ConstraintVT); // If this is a constraint for a specific physical register, like {r17}, // assign it now. // If this associated to a specific register, initialize iterator to correct // place. If virtual, make sure we have enough registers // Initialize iterator if necessary TargetRegisterClass::iterator I = RC->begin(); MachineRegisterInfo &RegInfo = MF.getRegInfo(); // Do not check for single registers. if (AssignedReg) { for (; *I != AssignedReg; ++I) assert(I != RC->end() && "AssignedReg should be member of RC"); } for (; NumRegs; --NumRegs, ++I) { assert(I != RC->end() && "Ran out of registers to allocate!"); Register R = AssignedReg ? Register(*I) : RegInfo.createVirtualRegister(RC); Regs.push_back(R); } OpInfo.AssignedRegs = RegsForValue(Regs, RegVT, ValueVT); } static unsigned findMatchingInlineAsmOperand(unsigned OperandNo, const std::vector<SDValue> &AsmNodeOperands) { // Scan until we find the definition we already emitted of this operand. unsigned CurOp = InlineAsm::Op_FirstOperand; for (; OperandNo; --OperandNo) { // Advance to the next operand. unsigned OpFlag = cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); assert((InlineAsm::isRegDefKind(OpFlag) || InlineAsm::isRegDefEarlyClobberKind(OpFlag) || InlineAsm::isMemKind(OpFlag)) && "Skipped past definitions?"); CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1; } return CurOp; } namespace { class ExtraFlags { unsigned Flags = 0; public: explicit ExtraFlags(ImmutableCallSite CS) { const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); if (IA->hasSideEffects()) Flags |= InlineAsm::Extra_HasSideEffects; if (IA->isAlignStack()) Flags |= InlineAsm::Extra_IsAlignStack; if (CS.isConvergent()) Flags |= InlineAsm::Extra_IsConvergent; Flags |= IA->getDialect() * InlineAsm::Extra_AsmDialect; } void update(const TargetLowering::AsmOperandInfo &OpInfo) { // Ideally, we would only check against memory constraints. However, the // meaning of an Other constraint can be target-specific and we can't easily // reason about it. Therefore, be conservative and set MayLoad/MayStore // for Other constraints as well. if (OpInfo.ConstraintType == TargetLowering::C_Memory || OpInfo.ConstraintType == TargetLowering::C_Other) { if (OpInfo.Type == InlineAsm::isInput) Flags |= InlineAsm::Extra_MayLoad; else if (OpInfo.Type == InlineAsm::isOutput) Flags |= InlineAsm::Extra_MayStore; else if (OpInfo.Type == InlineAsm::isClobber) Flags |= (InlineAsm::Extra_MayLoad | InlineAsm::Extra_MayStore); } } unsigned get() const { return Flags; } }; } // end anonymous namespace /// visitInlineAsm - Handle a call to an InlineAsm object. void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) { const InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue()); /// ConstraintOperands - Information about all of the constraints. SDISelAsmOperandInfoVector ConstraintOperands; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints( DAG.getDataLayout(), DAG.getSubtarget().getRegisterInfo(), CS); // First Pass: Calculate HasSideEffects and ExtraFlags (AlignStack, // AsmDialect, MayLoad, MayStore). bool HasSideEffect = IA->hasSideEffects(); ExtraFlags ExtraInfo(CS); unsigned ArgNo = 0; // ArgNo - The argument of the CallInst. unsigned ResNo = 0; // ResNo - The result number of the next output. for (auto &T : TargetConstraints) { ConstraintOperands.push_back(SDISelAsmOperandInfo(T)); SDISelAsmOperandInfo &OpInfo = ConstraintOperands.back(); // Compute the value type for each operand. if (OpInfo.Type == InlineAsm::isInput || (OpInfo.Type == InlineAsm::isOutput && OpInfo.isIndirect)) { OpInfo.CallOperandVal = const_cast<Value *>(CS.getArgument(ArgNo++)); // Process the call argument. BasicBlocks are labels, currently appearing // only in asm's. const Instruction *I = CS.getInstruction(); if (isa<CallBrInst>(I) && (ArgNo - 1) >= (cast<CallBrInst>(I)->getNumArgOperands() - cast<CallBrInst>(I)->getNumIndirectDests())) { const auto *BA = cast<BlockAddress>(OpInfo.CallOperandVal); EVT VT = TLI.getValueType(DAG.getDataLayout(), BA->getType(), true); OpInfo.CallOperand = DAG.getTargetBlockAddress(BA, VT); } else if (const auto *BB = dyn_cast<BasicBlock>(OpInfo.CallOperandVal)) { OpInfo.CallOperand = DAG.getBasicBlock(FuncInfo.MBBMap[BB]); } else { OpInfo.CallOperand = getValue(OpInfo.CallOperandVal); } OpInfo.ConstraintVT = OpInfo .getCallOperandValEVT(*DAG.getContext(), TLI, DAG.getDataLayout()) .getSimpleVT(); } else if (OpInfo.Type == InlineAsm::isOutput && !OpInfo.isIndirect) { // The return value of the call is this value. As such, there is no // corresponding argument. assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); if (StructType *STy = dyn_cast<StructType>(CS.getType())) { OpInfo.ConstraintVT = TLI.getSimpleValueType( DAG.getDataLayout(), STy->getElementType(ResNo)); } else { assert(ResNo == 0 && "Asm only has one result!"); OpInfo.ConstraintVT = TLI.getSimpleValueType(DAG.getDataLayout(), CS.getType()); } ++ResNo; } else { OpInfo.ConstraintVT = MVT::Other; } if (!HasSideEffect) HasSideEffect = OpInfo.hasMemory(TLI); // Determine if this InlineAsm MayLoad or MayStore based on the constraints. // FIXME: Could we compute this on OpInfo rather than T? // Compute the constraint code and ConstraintType to use. TLI.ComputeConstraintToUse(T, SDValue()); if (T.ConstraintType == TargetLowering::C_Immediate && OpInfo.CallOperand && !isa<ConstantSDNode>(OpInfo.CallOperand)) // We've delayed emitting a diagnostic like the "n" constraint because // inlining could cause an integer showing up. return emitInlineAsmError( CS, "constraint '" + Twine(T.ConstraintCode) + "' expects an " "integer constant expression"); ExtraInfo.update(T); } // We won't need to flush pending loads if this asm doesn't touch // memory and is nonvolatile. SDValue Flag, Chain = (HasSideEffect) ? getRoot() : DAG.getRoot(); bool IsCallBr = isa<CallBrInst>(CS.getInstruction()); if (IsCallBr) { // If this is a callbr we need to flush pending exports since inlineasm_br // is a terminator. We need to do this before nodes are glued to // the inlineasm_br node. Chain = getControlRoot(); } // Second pass over the constraints: compute which constraint option to use. for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { // If this is an output operand with a matching input operand, look up the // matching input. If their types mismatch, e.g. one is an integer, the // other is floating point, or their sizes are different, flag it as an // error. if (OpInfo.hasMatchingInput()) { SDISelAsmOperandInfo &Input = ConstraintOperands[OpInfo.MatchingInput]; patchMatchingInput(OpInfo, Input, DAG); } // Compute the constraint code and ConstraintType to use. TLI.ComputeConstraintToUse(OpInfo, OpInfo.CallOperand, &DAG); if (OpInfo.ConstraintType == TargetLowering::C_Memory && OpInfo.Type == InlineAsm::isClobber) continue; // If this is a memory input, and if the operand is not indirect, do what we // need to provide an address for the memory input. if (OpInfo.ConstraintType == TargetLowering::C_Memory && !OpInfo.isIndirect) { assert((OpInfo.isMultipleAlternative || (OpInfo.Type == InlineAsm::isInput)) && "Can only indirectify direct input operands!"); // Memory operands really want the address of the value. Chain = getAddressForMemoryInput(Chain, getCurSDLoc(), OpInfo, DAG); // There is no longer a Value* corresponding to this operand. OpInfo.CallOperandVal = nullptr; // It is now an indirect operand. OpInfo.isIndirect = true; } } // AsmNodeOperands - The operands for the ISD::INLINEASM node. std::vector<SDValue> AsmNodeOperands; AsmNodeOperands.push_back(SDValue()); // reserve space for input chain AsmNodeOperands.push_back(DAG.getTargetExternalSymbol( IA->getAsmString().c_str(), TLI.getPointerTy(DAG.getDataLayout()))); // If we have a !srcloc metadata node associated with it, we want to attach // this to the ultimately generated inline asm machineinstr. To do this, we // pass in the third operand as this (potentially null) inline asm MDNode. const MDNode *SrcLoc = CS.getInstruction()->getMetadata("srcloc"); AsmNodeOperands.push_back(DAG.getMDNode(SrcLoc)); // Remember the HasSideEffect, AlignStack, AsmDialect, MayLoad and MayStore // bits as operand 3. AsmNodeOperands.push_back(DAG.getTargetConstant( ExtraInfo.get(), getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); // Third pass: Loop over operands to prepare DAG-level operands.. As part of // this, assign virtual and physical registers for inputs and otput. for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { // Assign Registers. SDISelAsmOperandInfo &RefOpInfo = OpInfo.isMatchingInputConstraint() ? ConstraintOperands[OpInfo.getMatchedOperand()] : OpInfo; GetRegistersForValue(DAG, getCurSDLoc(), OpInfo, RefOpInfo); switch (OpInfo.Type) { case InlineAsm::isOutput: if (OpInfo.ConstraintType == TargetLowering::C_Memory || ((OpInfo.ConstraintType == TargetLowering::C_Immediate || OpInfo.ConstraintType == TargetLowering::C_Other) && OpInfo.isIndirect)) { unsigned ConstraintID = TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); assert(ConstraintID != InlineAsm::Constraint_Unknown && "Failed to convert memory constraint code to constraint id."); // Add information to the INLINEASM node to know about this output. unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(), MVT::i32)); AsmNodeOperands.push_back(OpInfo.CallOperand); break; } else if (((OpInfo.ConstraintType == TargetLowering::C_Immediate || OpInfo.ConstraintType == TargetLowering::C_Other) && !OpInfo.isIndirect) || OpInfo.ConstraintType == TargetLowering::C_Register || OpInfo.ConstraintType == TargetLowering::C_RegisterClass) { // Otherwise, this outputs to a register (directly for C_Register / // C_RegisterClass, and a target-defined fashion for // C_Immediate/C_Other). Find a register that we can use. if (OpInfo.AssignedRegs.Regs.empty()) { emitInlineAsmError( CS, "couldn't allocate output register for constraint '" + Twine(OpInfo.ConstraintCode) + "'"); return; } // Add information to the INLINEASM node to know that this register is // set. OpInfo.AssignedRegs.AddInlineAsmOperands( OpInfo.isEarlyClobber ? InlineAsm::Kind_RegDefEarlyClobber : InlineAsm::Kind_RegDef, false, 0, getCurSDLoc(), DAG, AsmNodeOperands); } break; case InlineAsm::isInput: { SDValue InOperandVal = OpInfo.CallOperand; if (OpInfo.isMatchingInputConstraint()) { // If this is required to match an output register we have already set, // just use its register. auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(), AsmNodeOperands); unsigned OpFlag = cast<ConstantSDNode>(AsmNodeOperands[CurOp])->getZExtValue(); if (InlineAsm::isRegDefKind(OpFlag) || InlineAsm::isRegDefEarlyClobberKind(OpFlag)) { // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. if (OpInfo.isIndirect) { // This happens on gcc/testsuite/gcc.dg/pr8788-1.c emitInlineAsmError(CS, "inline asm not supported yet:" " don't know how to handle tied " "indirect register inputs"); return; } MVT RegVT = AsmNodeOperands[CurOp+1].getSimpleValueType(); SmallVector<unsigned, 4> Regs; if (const TargetRegisterClass *RC = TLI.getRegClassFor(RegVT)) { unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag); MachineRegisterInfo &RegInfo = DAG.getMachineFunction().getRegInfo(); for (unsigned i = 0; i != NumRegs; ++i) Regs.push_back(RegInfo.createVirtualRegister(RC)); } else { emitInlineAsmError(CS, "inline asm error: This value type register " "class is not natively supported!"); return; } RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType()); SDLoc dl = getCurSDLoc(); // Use the produced MatchedRegs object to MatchedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, CS.getInstruction()); MatchedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, true, OpInfo.getMatchedOperand(), dl, DAG, AsmNodeOperands); break; } assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!"); assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 && "Unexpected number of operands"); // Add information to the INLINEASM node to know about this input. // See InlineAsm.h isUseOperandTiedToDef. OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag); OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag, OpInfo.getMatchedOperand()); AsmNodeOperands.push_back(DAG.getTargetConstant( OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); break; } // Treat indirect 'X' constraint as memory. if ((OpInfo.ConstraintType == TargetLowering::C_Immediate || OpInfo.ConstraintType == TargetLowering::C_Other) && OpInfo.isIndirect) OpInfo.ConstraintType = TargetLowering::C_Memory; if (OpInfo.ConstraintType == TargetLowering::C_Immediate || OpInfo.ConstraintType == TargetLowering::C_Other) { std::vector<SDValue> Ops; TLI.LowerAsmOperandForConstraint(InOperandVal, OpInfo.ConstraintCode, Ops, DAG); if (Ops.empty()) { if (OpInfo.ConstraintType == TargetLowering::C_Immediate) if (isa<ConstantSDNode>(InOperandVal)) { emitInlineAsmError(CS, "value out of range for constraint '" + Twine(OpInfo.ConstraintCode) + "'"); return; } emitInlineAsmError(CS, "invalid operand for inline asm constraint '" + Twine(OpInfo.ConstraintCode) + "'"); return; } // Add information to the INLINEASM node to know about this input. unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Imm, Ops.size()); AsmNodeOperands.push_back(DAG.getTargetConstant( ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); AsmNodeOperands.insert(AsmNodeOperands.end(), Ops.begin(), Ops.end()); break; } if (OpInfo.ConstraintType == TargetLowering::C_Memory) { assert(OpInfo.isIndirect && "Operand must be indirect to be a mem!"); assert(InOperandVal.getValueType() == TLI.getPointerTy(DAG.getDataLayout()) && "Memory operands expect pointer values"); unsigned ConstraintID = TLI.getInlineAsmMemConstraint(OpInfo.ConstraintCode); assert(ConstraintID != InlineAsm::Constraint_Unknown && "Failed to convert memory constraint code to constraint id."); // Add information to the INLINEASM node to know about this input. unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind_Mem, 1); ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID); AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32)); AsmNodeOperands.push_back(InOperandVal); break; } assert((OpInfo.ConstraintType == TargetLowering::C_RegisterClass || OpInfo.ConstraintType == TargetLowering::C_Register || OpInfo.ConstraintType == TargetLowering::C_Immediate) && "Unknown constraint type!"); // TODO: Support this. if (OpInfo.isIndirect) { emitInlineAsmError( CS, "Don't know how to handle indirect register inputs yet " "for constraint '" + Twine(OpInfo.ConstraintCode) + "'"); return; } // Copy the input into the appropriate registers. if (OpInfo.AssignedRegs.Regs.empty()) { emitInlineAsmError(CS, "couldn't allocate input reg for constraint '" + Twine(OpInfo.ConstraintCode) + "'"); return; } SDLoc dl = getCurSDLoc(); OpInfo.AssignedRegs.getCopyToRegs(InOperandVal, DAG, dl, Chain, &Flag, CS.getInstruction()); OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_RegUse, false, 0, dl, DAG, AsmNodeOperands); break; } case InlineAsm::isClobber: // Add the clobbered value to the operand list, so that the register // allocator is aware that the physreg got clobbered. if (!OpInfo.AssignedRegs.Regs.empty()) OpInfo.AssignedRegs.AddInlineAsmOperands(InlineAsm::Kind_Clobber, false, 0, getCurSDLoc(), DAG, AsmNodeOperands); break; } } // Finish up input operands. Set the input chain and add the flag last. AsmNodeOperands[InlineAsm::Op_InputChain] = Chain; if (Flag.getNode()) AsmNodeOperands.push_back(Flag); unsigned ISDOpc = IsCallBr ? ISD::INLINEASM_BR : ISD::INLINEASM; Chain = DAG.getNode(ISDOpc, getCurSDLoc(), DAG.getVTList(MVT::Other, MVT::Glue), AsmNodeOperands); Flag = Chain.getValue(1); // Do additional work to generate outputs. SmallVector<EVT, 1> ResultVTs; SmallVector<SDValue, 1> ResultValues; SmallVector<SDValue, 8> OutChains; llvm::Type *CSResultType = CS.getType(); ArrayRef<Type *> ResultTypes; if (StructType *StructResult = dyn_cast<StructType>(CSResultType)) ResultTypes = StructResult->elements(); else if (!CSResultType->isVoidTy()) ResultTypes = makeArrayRef(CSResultType); auto CurResultType = ResultTypes.begin(); auto handleRegAssign = [&](SDValue V) { assert(CurResultType != ResultTypes.end() && "Unexpected value"); assert((*CurResultType)->isSized() && "Unexpected unsized type"); EVT ResultVT = TLI.getValueType(DAG.getDataLayout(), *CurResultType); ++CurResultType; // If the type of the inline asm call site return value is different but has // same size as the type of the asm output bitcast it. One example of this // is for vectors with different width / number of elements. This can // happen for register classes that can contain multiple different value // types. The preg or vreg allocated may not have the same VT as was // expected. // // This can also happen for a return value that disagrees with the register // class it is put in, eg. a double in a general-purpose register on a // 32-bit machine. if (ResultVT != V.getValueType() && ResultVT.getSizeInBits() == V.getValueSizeInBits()) V = DAG.getNode(ISD::BITCAST, getCurSDLoc(), ResultVT, V); else if (ResultVT != V.getValueType() && ResultVT.isInteger() && V.getValueType().isInteger()) { // If a result value was tied to an input value, the computed result // may have a wider width than the expected result. Extract the // relevant portion. V = DAG.getNode(ISD::TRUNCATE, getCurSDLoc(), ResultVT, V); } assert(ResultVT == V.getValueType() && "Asm result value mismatch!"); ResultVTs.push_back(ResultVT); ResultValues.push_back(V); }; // Deal with output operands. for (SDISelAsmOperandInfo &OpInfo : ConstraintOperands) { if (OpInfo.Type == InlineAsm::isOutput) { SDValue Val; // Skip trivial output operands. if (OpInfo.AssignedRegs.Regs.empty()) continue; switch (OpInfo.ConstraintType) { case TargetLowering::C_Register: case TargetLowering::C_RegisterClass: Val = OpInfo.AssignedRegs.getCopyFromRegs( DAG, FuncInfo, getCurSDLoc(), Chain, &Flag, CS.getInstruction()); break; case TargetLowering::C_Immediate: case TargetLowering::C_Other: Val = TLI.LowerAsmOutputForConstraint(Chain, Flag, getCurSDLoc(), OpInfo, DAG); break; case TargetLowering::C_Memory: break; // Already handled. case TargetLowering::C_Unknown: assert(false && "Unexpected unknown constraint"); } // Indirect output manifest as stores. Record output chains. if (OpInfo.isIndirect) { const Value *Ptr = OpInfo.CallOperandVal; assert(Ptr && "Expected value CallOperandVal for indirect asm operand"); SDValue Store = DAG.getStore(Chain, getCurSDLoc(), Val, getValue(Ptr), MachinePointerInfo(Ptr)); OutChains.push_back(Store); } else { // generate CopyFromRegs to associated registers. assert(!CS.getType()->isVoidTy() && "Bad inline asm!"); if (Val.getOpcode() == ISD::MERGE_VALUES) { for (const SDValue &V : Val->op_values()) handleRegAssign(V); } else handleRegAssign(Val); } } } // Set results. if (!ResultValues.empty()) { assert(CurResultType == ResultTypes.end() && "Mismatch in number of ResultTypes"); assert(ResultValues.size() == ResultTypes.size() && "Mismatch in number of output operands in asm result"); SDValue V = DAG.getNode(ISD::MERGE_VALUES, getCurSDLoc(), DAG.getVTList(ResultVTs), ResultValues); setValue(CS.getInstruction(), V); } // Collect store chains. if (!OutChains.empty()) Chain = DAG.getNode(ISD::TokenFactor, getCurSDLoc(), MVT::Other, OutChains); // Only Update Root if inline assembly has a memory effect. if (ResultValues.empty() || HasSideEffect || !OutChains.empty() || IsCallBr) DAG.setRoot(Chain); } void SelectionDAGBuilder::emitInlineAsmError(ImmutableCallSite CS, const Twine &Message) { LLVMContext &Ctx = *DAG.getContext(); Ctx.emitError(CS.getInstruction(), Message); // Make sure we leave the DAG in a valid state const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SmallVector<EVT, 1> ValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs); if (ValueVTs.empty()) return; SmallVector<SDValue, 1> Ops; for (unsigned i = 0, e = ValueVTs.size(); i != e; ++i) Ops.push_back(DAG.getUNDEF(ValueVTs[i])); setValue(CS.getInstruction(), DAG.getMergeValues(Ops, getCurSDLoc())); } void SelectionDAGBuilder::visitVAStart(const CallInst &I) { DAG.setRoot(DAG.getNode(ISD::VASTART, getCurSDLoc(), MVT::Other, getRoot(), getValue(I.getArgOperand(0)), DAG.getSrcValue(I.getArgOperand(0)))); } void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); const DataLayout &DL = DAG.getDataLayout(); SDValue V = DAG.getVAArg( TLI.getMemValueType(DAG.getDataLayout(), I.getType()), getCurSDLoc(), getRoot(), getValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)), DL.getABITypeAlignment(I.getType())); DAG.setRoot(V.getValue(1)); if (I.getType()->isPointerTy()) V = DAG.getPtrExtOrTrunc( V, getCurSDLoc(), TLI.getValueType(DAG.getDataLayout(), I.getType())); setValue(&I, V); } void SelectionDAGBuilder::visitVAEnd(const CallInst &I) { DAG.setRoot(DAG.getNode(ISD::VAEND, getCurSDLoc(), MVT::Other, getRoot(), getValue(I.getArgOperand(0)), DAG.getSrcValue(I.getArgOperand(0)))); } void SelectionDAGBuilder::visitVACopy(const CallInst &I) { DAG.setRoot(DAG.getNode(ISD::VACOPY, getCurSDLoc(), MVT::Other, getRoot(), getValue(I.getArgOperand(0)), getValue(I.getArgOperand(1)), DAG.getSrcValue(I.getArgOperand(0)), DAG.getSrcValue(I.getArgOperand(1)))); } SDValue SelectionDAGBuilder::lowerRangeToAssertZExt(SelectionDAG &DAG, const Instruction &I, SDValue Op) { const MDNode *Range = I.getMetadata(LLVMContext::MD_range); if (!Range) return Op; ConstantRange CR = getConstantRangeFromMetadata(*Range); if (CR.isFullSet() || CR.isEmptySet() || CR.isUpperWrapped()) return Op; APInt Lo = CR.getUnsignedMin(); if (!Lo.isMinValue()) return Op; APInt Hi = CR.getUnsignedMax(); unsigned Bits = std::max(Hi.getActiveBits(), static_cast<unsigned>(IntegerType::MIN_INT_BITS)); EVT SmallVT = EVT::getIntegerVT(*DAG.getContext(), Bits); SDLoc SL = getCurSDLoc(); SDValue ZExt = DAG.getNode(ISD::AssertZext, SL, Op.getValueType(), Op, DAG.getValueType(SmallVT)); unsigned NumVals = Op.getNode()->getNumValues(); if (NumVals == 1) return ZExt; SmallVector<SDValue, 4> Ops; Ops.push_back(ZExt); for (unsigned I = 1; I != NumVals; ++I) Ops.push_back(Op.getValue(I)); return DAG.getMergeValues(Ops, SL); } /// Populate a CallLowerinInfo (into \p CLI) based on the properties of /// the call being lowered. /// /// This is a helper for lowering intrinsics that follow a target calling /// convention or require stack pointer adjustment. Only a subset of the /// intrinsic's operands need to participate in the calling convention. void SelectionDAGBuilder::populateCallLoweringInfo( TargetLowering::CallLoweringInfo &CLI, const CallBase *Call, unsigned ArgIdx, unsigned NumArgs, SDValue Callee, Type *ReturnTy, bool IsPatchPoint) { TargetLowering::ArgListTy Args; Args.reserve(NumArgs); // Populate the argument list. // Attributes for args start at offset 1, after the return attribute. for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) { const Value *V = Call->getOperand(ArgI); assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic."); TargetLowering::ArgListEntry Entry; Entry.Node = getValue(V); Entry.Ty = V->getType(); Entry.setAttributes(Call, ArgI); Args.push_back(Entry); } CLI.setDebugLoc(getCurSDLoc()) .setChain(getRoot()) .setCallee(Call->getCallingConv(), ReturnTy, Callee, std::move(Args)) .setDiscardResult(Call->use_empty()) .setIsPatchPoint(IsPatchPoint); } /// Add a stack map intrinsic call's live variable operands to a stackmap /// or patchpoint target node's operand list. /// /// Constants are converted to TargetConstants purely as an optimization to /// avoid constant materialization and register allocation. /// /// FrameIndex operands are converted to TargetFrameIndex so that ISEL does not /// generate addess computation nodes, and so FinalizeISel can convert the /// TargetFrameIndex into a DirectMemRefOp StackMap location. This avoids /// address materialization and register allocation, but may also be required /// for correctness. If a StackMap (or PatchPoint) intrinsic directly uses an /// alloca in the entry block, then the runtime may assume that the alloca's /// StackMap location can be read immediately after compilation and that the /// location is valid at any point during execution (this is similar to the /// assumption made by the llvm.gcroot intrinsic). If the alloca's location were /// only available in a register, then the runtime would need to trap when /// execution reaches the StackMap in order to read the alloca's location. static void addStackMapLiveVars(ImmutableCallSite CS, unsigned StartIdx, const SDLoc &DL, SmallVectorImpl<SDValue> &Ops, SelectionDAGBuilder &Builder) { for (unsigned i = StartIdx, e = CS.arg_size(); i != e; ++i) { SDValue OpVal = Builder.getValue(CS.getArgument(i)); if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(OpVal)) { Ops.push_back( Builder.DAG.getTargetConstant(StackMaps::ConstantOp, DL, MVT::i64)); Ops.push_back( Builder.DAG.getTargetConstant(C->getSExtValue(), DL, MVT::i64)); } else if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(OpVal)) { const TargetLowering &TLI = Builder.DAG.getTargetLoweringInfo(); Ops.push_back(Builder.DAG.getTargetFrameIndex( FI->getIndex(), TLI.getFrameIndexTy(Builder.DAG.getDataLayout()))); } else Ops.push_back(OpVal); } } /// Lower llvm.experimental.stackmap directly to its target opcode. void SelectionDAGBuilder::visitStackmap(const CallInst &CI) { // void @llvm.experimental.stackmap(i32 <id>, i32 <numShadowBytes>, // [live variables...]) assert(CI.getType()->isVoidTy() && "Stackmap cannot return a value."); SDValue Chain, InFlag, Callee, NullPtr; SmallVector<SDValue, 32> Ops; SDLoc DL = getCurSDLoc(); Callee = getValue(CI.getCalledValue()); NullPtr = DAG.getIntPtrConstant(0, DL, true); // The stackmap intrinsic only records the live variables (the arguemnts // passed to it) and emits NOPS (if requested). Unlike the patchpoint // intrinsic, this won't be lowered to a function call. This means we don't // have to worry about calling conventions and target specific lowering code. // Instead we perform the call lowering right here. // // chain, flag = CALLSEQ_START(chain, 0, 0) // chain, flag = STACKMAP(id, nbytes, ..., chain, flag) // chain, flag = CALLSEQ_END(chain, 0, 0, flag) // Chain = DAG.getCALLSEQ_START(getRoot(), 0, 0, DL); InFlag = Chain.getValue(1); // Add the <id> and <numBytes> constants. SDValue IDVal = getValue(CI.getOperand(PatchPointOpers::IDPos)); Ops.push_back(DAG.getTargetConstant( cast<ConstantSDNode>(IDVal)->getZExtValue(), DL, MVT::i64)); SDValue NBytesVal = getValue(CI.getOperand(PatchPointOpers::NBytesPos)); Ops.push_back(DAG.getTargetConstant( cast<ConstantSDNode>(NBytesVal)->getZExtValue(), DL, MVT::i32)); // Push live variables for the stack map. addStackMapLiveVars(&CI, 2, DL, Ops, *this); // We are not pushing any register mask info here on the operands list, // because the stackmap doesn't clobber anything. // Push the chain and the glue flag. Ops.push_back(Chain); Ops.push_back(InFlag); // Create the STACKMAP node. SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); SDNode *SM = DAG.getMachineNode(TargetOpcode::STACKMAP, DL, NodeTys, Ops); Chain = SDValue(SM, 0); InFlag = Chain.getValue(1); Chain = DAG.getCALLSEQ_END(Chain, NullPtr, NullPtr, InFlag, DL); // Stackmaps don't generate values, so nothing goes into the NodeMap. // Set the root to the target-lowered call chain. DAG.setRoot(Chain); // Inform the Frame Information that we have a stackmap in this function. FuncInfo.MF->getFrameInfo().setHasStackMap(); } /// Lower llvm.experimental.patchpoint directly to its target opcode. void SelectionDAGBuilder::visitPatchpoint(ImmutableCallSite CS, const BasicBlock *EHPadBB) { // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>, // i32 <numBytes>, // i8* <target>, // i32 <numArgs>, // [Args...], // [live variables...]) CallingConv::ID CC = CS.getCallingConv(); bool IsAnyRegCC = CC == CallingConv::AnyReg; bool HasDef = !CS->getType()->isVoidTy(); SDLoc dl = getCurSDLoc(); SDValue Callee = getValue(CS->getOperand(PatchPointOpers::TargetPos)); // Handle immediate and symbolic callees. if (auto* ConstCallee = dyn_cast<ConstantSDNode>(Callee)) Callee = DAG.getIntPtrConstant(ConstCallee->getZExtValue(), dl, /*isTarget=*/true); else if (auto* SymbolicCallee = dyn_cast<GlobalAddressSDNode>(Callee)) Callee = DAG.getTargetGlobalAddress(SymbolicCallee->getGlobal(), SDLoc(SymbolicCallee), SymbolicCallee->getValueType(0)); // Get the real number of arguments participating in the call <numArgs> SDValue NArgVal = getValue(CS.getArgument(PatchPointOpers::NArgPos)); unsigned NumArgs = cast<ConstantSDNode>(NArgVal)->getZExtValue(); // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs> // Intrinsics include all meta-operands up to but not including CC. unsigned NumMetaOpers = PatchPointOpers::CCPos; assert(CS.arg_size() >= NumMetaOpers + NumArgs && "Not enough arguments provided to the patchpoint intrinsic"); // For AnyRegCC the arguments are lowered later on manually. unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs; Type *ReturnTy = IsAnyRegCC ? Type::getVoidTy(*DAG.getContext()) : CS->getType(); TargetLowering::CallLoweringInfo CLI(DAG); populateCallLoweringInfo(CLI, cast<CallBase>(CS.getInstruction()), NumMetaOpers, NumCallArgs, Callee, ReturnTy, true); std::pair<SDValue, SDValue> Result = lowerInvokable(CLI, EHPadBB); SDNode *CallEnd = Result.second.getNode(); if (HasDef && (CallEnd->getOpcode() == ISD::CopyFromReg)) CallEnd = CallEnd->getOperand(0).getNode(); /// Get a call instruction from the call sequence chain. /// Tail calls are not allowed. assert(CallEnd->getOpcode() == ISD::CALLSEQ_END && "Expected a callseq node."); SDNode *Call = CallEnd->getOperand(0).getNode(); bool HasGlue = Call->getGluedNode(); // Replace the target specific call node with the patchable intrinsic. SmallVector<SDValue, 8> Ops; // Add the <id> and <numBytes> constants. SDValue IDVal = getValue(CS->getOperand(PatchPointOpers::IDPos)); Ops.push_back(DAG.getTargetConstant( cast<ConstantSDNode>(IDVal)->getZExtValue(), dl, MVT::i64)); SDValue NBytesVal = getValue(CS->getOperand(PatchPointOpers::NBytesPos)); Ops.push_back(DAG.getTargetConstant( cast<ConstantSDNode>(NBytesVal)->getZExtValue(), dl, MVT::i32)); // Add the callee. Ops.push_back(Callee); // Adjust <numArgs> to account for any arguments that have been passed on the // stack instead. // Call Node: Chain, Target, {Args}, RegMask, [Glue] unsigned NumCallRegArgs = Call->getNumOperands() - (HasGlue ? 4 : 3); NumCallRegArgs = IsAnyRegCC ? NumArgs : NumCallRegArgs; Ops.push_back(DAG.getTargetConstant(NumCallRegArgs, dl, MVT::i32)); // Add the calling convention Ops.push_back(DAG.getTargetConstant((unsigned)CC, dl, MVT::i32)); // Add the arguments we omitted previously. The register allocator should // place these in any free register. if (IsAnyRegCC) for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) Ops.push_back(getValue(CS.getArgument(i))); // Push the arguments from the call instruction up to the register mask. SDNode::op_iterator e = HasGlue ? Call->op_end()-2 : Call->op_end()-1; Ops.append(Call->op_begin() + 2, e); // Push live variables for the stack map. addStackMapLiveVars(CS, NumMetaOpers + NumArgs, dl, Ops, *this); // Push the register mask info. if (HasGlue) Ops.push_back(*(Call->op_end()-2)); else Ops.push_back(*(Call->op_end()-1)); // Push the chain (this is originally the first operand of the call, but // becomes now the last or second to last operand). Ops.push_back(*(Call->op_begin())); // Push the glue flag (last operand). if (HasGlue) Ops.push_back(*(Call->op_end()-1)); SDVTList NodeTys; if (IsAnyRegCC && HasDef) { // Create the return types based on the intrinsic definition const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SmallVector<EVT, 3> ValueVTs; ComputeValueVTs(TLI, DAG.getDataLayout(), CS->getType(), ValueVTs); assert(ValueVTs.size() == 1 && "Expected only one return value type."); // There is always a chain and a glue type at the end ValueVTs.push_back(MVT::Other); ValueVTs.push_back(MVT::Glue); NodeTys = DAG.getVTList(ValueVTs); } else NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); // Replace the target specific call node with a PATCHPOINT node. MachineSDNode *MN = DAG.getMachineNode(TargetOpcode::PATCHPOINT, dl, NodeTys, Ops); // Update the NodeMap. if (HasDef) { if (IsAnyRegCC) setValue(CS.getInstruction(), SDValue(MN, 0)); else setValue(CS.getInstruction(), Result.first); } // Fixup the consumers of the intrinsic. The chain and glue may be used in the // call sequence. Furthermore the location of the chain and glue can change // when the AnyReg calling convention is used and the intrinsic returns a // value. if (IsAnyRegCC && HasDef) { SDValue From[] = {SDValue(Call, 0), SDValue(Call, 1)}; SDValue To[] = {SDValue(MN, 1), SDValue(MN, 2)}; DAG.ReplaceAllUsesOfValuesWith(From, To, 2); } else DAG.ReplaceAllUsesWith(Call, MN); DAG.DeleteNode(Call); // Inform the Frame Information that we have a patchpoint in this function. FuncInfo.MF->getFrameInfo().setHasPatchPoint(); } void SelectionDAGBuilder::visitVectorReduce(const CallInst &I, unsigned Intrinsic) { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); SDValue Op1 = getValue(I.getArgOperand(0)); SDValue Op2; if (I.getNumArgOperands() > 1) Op2 = getValue(I.getArgOperand(1)); SDLoc dl = getCurSDLoc(); EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType()); SDValue Res; FastMathFlags FMF; if (isa<FPMathOperator>(I)) FMF = I.getFastMathFlags(); switch (Intrinsic) { case Intrinsic::experimental_vector_reduce_v2_fadd: if (FMF.allowReassoc()) Res = DAG.getNode(ISD::FADD, dl, VT, Op1, DAG.getNode(ISD::VECREDUCE_FADD, dl, VT, Op2)); else Res = DAG.getNode(ISD::VECREDUCE_STRICT_FADD, dl, VT, Op1, Op2); break; case Intrinsic::experimental_vector_reduce_v2_fmul: if (FMF.allowReassoc()) Res = DAG.getNode(ISD::FMUL, dl, VT, Op1, DAG.getNode(ISD::VECREDUCE_FMUL, dl, VT, Op2)); else Res = DAG.getNode(ISD::VECREDUCE_STRICT_FMUL, dl, VT, Op1, Op2); break; case Intrinsic::experimental_vector_reduce_add: Res = DAG.getNode(ISD::VECREDUCE_ADD, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_mul: Res = DAG.getNode(ISD::VECREDUCE_MUL, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_and: Res = DAG.getNode(ISD::VECREDUCE_AND, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_or: Res = DAG.getNode(ISD::VECREDUCE_OR, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_xor: Res = DAG.getNode(ISD::VECREDUCE_XOR, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_smax: Res = DAG.getNode(ISD::VECREDUCE_SMAX, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_smin: Res = DAG.getNode(ISD::VECREDUCE_SMIN, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_umax: Res = DAG.getNode(ISD::VECREDUCE_UMAX, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_umin: Res = DAG.getNode(ISD::VECREDUCE_UMIN, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_fmax: Res = DAG.getNode(ISD::VECREDUCE_FMAX, dl, VT, Op1); break; case Intrinsic::experimental_vector_reduce_fmin: Res = DAG.getNode(ISD::VECREDUCE_FMIN, dl, VT, Op1); break; default: llvm_unreachable("Unhandled vector reduce intrinsic"); } setValue(&I, Res); } /// Returns an AttributeList representing the attributes applied to the return /// value of the given call. static AttributeList getReturnAttrs(TargetLowering::CallLoweringInfo &CLI) { SmallVector<Attribute::AttrKind, 2> Attrs; if (CLI.RetSExt) Attrs.push_back(Attribute::SExt); if (CLI.RetZExt) Attrs.push_back(Attribute::ZExt); if (CLI.IsInReg) Attrs.push_back(Attribute::InReg); return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex, Attrs); } /// TargetLowering::LowerCallTo - This is the default LowerCallTo /// implementation, which just calls LowerCall. /// FIXME: When all targets are /// migrated to using LowerCall, this hook should be integrated into SDISel. std::pair<SDValue, SDValue> TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const { // Handle the incoming return values from the call. CLI.Ins.clear(); Type *OrigRetTy = CLI.RetTy; SmallVector<EVT, 4> RetTys; SmallVector<uint64_t, 4> Offsets; auto &DL = CLI.DAG.getDataLayout(); ComputeValueVTs(*this, DL, CLI.RetTy, RetTys, &Offsets); if (CLI.IsPostTypeLegalization) { // If we are lowering a libcall after legalization, split the return type. SmallVector<EVT, 4> OldRetTys; SmallVector<uint64_t, 4> OldOffsets; RetTys.swap(OldRetTys); Offsets.swap(OldOffsets); for (size_t i = 0, e = OldRetTys.size(); i != e; ++i) { EVT RetVT = OldRetTys[i]; uint64_t Offset = OldOffsets[i]; MVT RegisterVT = getRegisterType(CLI.RetTy->getContext(), RetVT); unsigned NumRegs = getNumRegisters(CLI.RetTy->getContext(), RetVT); unsigned RegisterVTByteSZ = RegisterVT.getSizeInBits() / 8; RetTys.append(NumRegs, RegisterVT); for (unsigned j = 0; j != NumRegs; ++j) Offsets.push_back(Offset + j * RegisterVTByteSZ); } } SmallVector<ISD::OutputArg, 4> Outs; GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, *this, DL); bool CanLowerReturn = this->CanLowerReturn(CLI.CallConv, CLI.DAG.getMachineFunction(), CLI.IsVarArg, Outs, CLI.RetTy->getContext()); SDValue DemoteStackSlot; int DemoteStackIdx = -100; if (!CanLowerReturn) { // FIXME: equivalent assert? // assert(!CS.hasInAllocaArgument() && // "sret demotion is incompatible with inalloca"); uint64_t TySize = DL.getTypeAllocSize(CLI.RetTy); unsigned Align = DL.getPrefTypeAlignment(CLI.RetTy); MachineFunction &MF = CLI.DAG.getMachineFunction(); DemoteStackIdx = MF.getFrameInfo().CreateStackObject(TySize, Align, false); Type *StackSlotPtrType = PointerType::get(CLI.RetTy, DL.getAllocaAddrSpace()); DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL)); ArgListEntry Entry; Entry.Node = DemoteStackSlot; Entry.Ty = StackSlotPtrType; Entry.IsSExt = false; Entry.IsZExt = false; Entry.IsInReg = false; Entry.IsSRet = true; Entry.IsNest = false; Entry.IsByVal = false; Entry.IsReturned = false; Entry.IsSwiftSelf = false; Entry.IsSwiftError = false; Entry.Alignment = Align; CLI.getArgs().insert(CLI.getArgs().begin(), Entry); CLI.NumFixedArgs += 1; CLI.RetTy = Type::getVoidTy(CLI.RetTy->getContext()); // sret demotion isn't compatible with tail-calls, since the sret argument // points into the callers stack frame. CLI.IsTailCall = false; } else { bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( CLI.RetTy, CLI.CallConv, CLI.IsVarArg); for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { ISD::ArgFlagsTy Flags; if (NeedsRegBlock) { Flags.setInConsecutiveRegs(); if (I == RetTys.size() - 1) Flags.setInConsecutiveRegsLast(); } EVT VT = RetTys[I]; MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); for (unsigned i = 0; i != NumRegs; ++i) { ISD::InputArg MyFlags; MyFlags.Flags = Flags; MyFlags.VT = RegisterVT; MyFlags.ArgVT = VT; MyFlags.Used = CLI.IsReturnValueUsed; if (CLI.RetTy->isPointerTy()) { MyFlags.Flags.setPointer(); MyFlags.Flags.setPointerAddrSpace( cast<PointerType>(CLI.RetTy)->getAddressSpace()); } if (CLI.RetSExt) MyFlags.Flags.setSExt(); if (CLI.RetZExt) MyFlags.Flags.setZExt(); if (CLI.IsInReg) MyFlags.Flags.setInReg(); CLI.Ins.push_back(MyFlags); } } } // We push in swifterror return as the last element of CLI.Ins. ArgListTy &Args = CLI.getArgs(); if (supportSwiftError()) { for (unsigned i = 0, e = Args.size(); i != e; ++i) { if (Args[i].IsSwiftError) { ISD::InputArg MyFlags; MyFlags.VT = getPointerTy(DL); MyFlags.ArgVT = EVT(getPointerTy(DL)); MyFlags.Flags.setSwiftError(); CLI.Ins.push_back(MyFlags); } } } // Handle all of the outgoing arguments. CLI.Outs.clear(); CLI.OutVals.clear(); for (unsigned i = 0, e = Args.size(); i != e; ++i) { SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(*this, DL, Args[i].Ty, ValueVTs); // FIXME: Split arguments if CLI.IsPostTypeLegalization Type *FinalType = Args[i].Ty; if (Args[i].IsByVal) FinalType = cast<PointerType>(Args[i].Ty)->getElementType(); bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters( FinalType, CLI.CallConv, CLI.IsVarArg); for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; ++Value) { EVT VT = ValueVTs[Value]; Type *ArgTy = VT.getTypeForEVT(CLI.RetTy->getContext()); SDValue Op = SDValue(Args[i].Node.getNode(), Args[i].Node.getResNo() + Value); ISD::ArgFlagsTy Flags; // Certain targets (such as MIPS), may have a different ABI alignment // for a type depending on the context. Give the target a chance to // specify the alignment it wants. unsigned OriginalAlignment = getABIAlignmentForCallingConv(ArgTy, DL); if (Args[i].Ty->isPointerTy()) { Flags.setPointer(); Flags.setPointerAddrSpace( cast<PointerType>(Args[i].Ty)->getAddressSpace()); } if (Args[i].IsZExt) Flags.setZExt(); if (Args[i].IsSExt) Flags.setSExt(); if (Args[i].IsInReg) { // If we are using vectorcall calling convention, a structure that is // passed InReg - is surely an HVA if (CLI.CallConv == CallingConv::X86_VectorCall && isa<StructType>(FinalType)) { // The first value of a structure is marked if (0 == Value) Flags.setHvaStart(); Flags.setHva(); } // Set InReg Flag Flags.setInReg(); } if (Args[i].IsSRet) Flags.setSRet(); if (Args[i].IsSwiftSelf) Flags.setSwiftSelf(); if (Args[i].IsSwiftError) Flags.setSwiftError(); if (Args[i].IsByVal) Flags.setByVal(); if (Args[i].IsInAlloca) { Flags.setInAlloca(); // Set the byval flag for CCAssignFn callbacks that don't know about // inalloca. This way we can know how many bytes we should've allocated // and how many bytes a callee cleanup function will pop. If we port // inalloca to more targets, we'll have to add custom inalloca handling // in the various CC lowering callbacks. Flags.setByVal(); } if (Args[i].IsByVal || Args[i].IsInAlloca) { PointerType *Ty = cast<PointerType>(Args[i].Ty); Type *ElementTy = Ty->getElementType(); unsigned FrameSize = DL.getTypeAllocSize( Args[i].ByValType ? Args[i].ByValType : ElementTy); Flags.setByValSize(FrameSize); // info is not there but there are cases it cannot get right. unsigned FrameAlign; if (Args[i].Alignment) FrameAlign = Args[i].Alignment; else FrameAlign = getByValTypeAlignment(ElementTy, DL); Flags.setByValAlign(FrameAlign); } if (Args[i].IsNest) Flags.setNest(); if (NeedsRegBlock) Flags.setInConsecutiveRegs(); Flags.setOrigAlign(OriginalAlignment); MVT PartVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); unsigned NumParts = getNumRegistersForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); SmallVector<SDValue, 4> Parts(NumParts); ISD::NodeType ExtendKind = ISD::ANY_EXTEND; if (Args[i].IsSExt) ExtendKind = ISD::SIGN_EXTEND; else if (Args[i].IsZExt) ExtendKind = ISD::ZERO_EXTEND; // Conservatively only handle 'returned' on non-vectors that can be lowered, // for now. if (Args[i].IsReturned && !Op.getValueType().isVector() && CanLowerReturn) { assert((CLI.RetTy == Args[i].Ty || (CLI.RetTy->isPointerTy() && Args[i].Ty->isPointerTy() && CLI.RetTy->getPointerAddressSpace() == Args[i].Ty->getPointerAddressSpace())) && RetTys.size() == NumValues && "unexpected use of 'returned'"); // Before passing 'returned' to the target lowering code, ensure that // either the register MVT and the actual EVT are the same size or that // the return value and argument are extended in the same way; in these // cases it's safe to pass the argument register value unchanged as the // return register value (although it's at the target's option whether // to do so) // TODO: allow code generation to take advantage of partially preserved // registers rather than clobbering the entire register when the // parameter extension method is not compatible with the return // extension method if ((NumParts * PartVT.getSizeInBits() == VT.getSizeInBits()) || (ExtendKind != ISD::ANY_EXTEND && CLI.RetSExt == Args[i].IsSExt && CLI.RetZExt == Args[i].IsZExt)) Flags.setReturned(); } getCopyToParts(CLI.DAG, CLI.DL, Op, &Parts[0], NumParts, PartVT, CLI.CS.getInstruction(), CLI.CallConv, ExtendKind); for (unsigned j = 0; j != NumParts; ++j) { // if it isn't first piece, alignment must be 1 ISD::OutputArg MyFlags(Flags, Parts[j].getValueType(), VT, i < CLI.NumFixedArgs, i, j*Parts[j].getValueType().getStoreSize()); if (NumParts > 1 && j == 0) MyFlags.Flags.setSplit(); else if (j != 0) { MyFlags.Flags.setOrigAlign(1); if (j == NumParts - 1) MyFlags.Flags.setSplitEnd(); } CLI.Outs.push_back(MyFlags); CLI.OutVals.push_back(Parts[j]); } if (NeedsRegBlock && Value == NumValues - 1) CLI.Outs[CLI.Outs.size() - 1].Flags.setInConsecutiveRegsLast(); } } SmallVector<SDValue, 4> InVals; CLI.Chain = LowerCall(CLI, InVals); // Update CLI.InVals to use outside of this function. CLI.InVals = InVals; // Verify that the target's LowerCall behaved as expected. assert(CLI.Chain.getNode() && CLI.Chain.getValueType() == MVT::Other && "LowerCall didn't return a valid chain!"); assert((!CLI.IsTailCall || InVals.empty()) && "LowerCall emitted a return value for a tail call!"); assert((CLI.IsTailCall || InVals.size() == CLI.Ins.size()) && "LowerCall didn't emit the correct number of values!"); // For a tail call, the return value is merely live-out and there aren't // any nodes in the DAG representing it. Return a special value to // indicate that a tail call has been emitted and no more Instructions // should be processed in the current block. if (CLI.IsTailCall) { CLI.DAG.setRoot(CLI.Chain); return std::make_pair(SDValue(), SDValue()); } #ifndef NDEBUG for (unsigned i = 0, e = CLI.Ins.size(); i != e; ++i) { assert(InVals[i].getNode() && "LowerCall emitted a null value!"); assert(EVT(CLI.Ins[i].VT) == InVals[i].getValueType() && "LowerCall emitted a value with the wrong type!"); } #endif SmallVector<SDValue, 4> ReturnValues; if (!CanLowerReturn) { // The instruction result is the result of loading from the // hidden sret parameter. SmallVector<EVT, 1> PVTs; Type *PtrRetTy = OrigRetTy->getPointerTo(DL.getAllocaAddrSpace()); ComputeValueVTs(*this, DL, PtrRetTy, PVTs); assert(PVTs.size() == 1 && "Pointers should fit in one register"); EVT PtrVT = PVTs[0]; unsigned NumValues = RetTys.size(); ReturnValues.resize(NumValues); SmallVector<SDValue, 4> Chains(NumValues); // An aggregate return value cannot wrap around the address space, so // offsets to its parts don't wrap either. SDNodeFlags Flags; Flags.setNoUnsignedWrap(true); for (unsigned i = 0; i < NumValues; ++i) { SDValue Add = CLI.DAG.getNode(ISD::ADD, CLI.DL, PtrVT, DemoteStackSlot, CLI.DAG.getConstant(Offsets[i], CLI.DL, PtrVT), Flags); SDValue L = CLI.DAG.getLoad( RetTys[i], CLI.DL, CLI.Chain, Add, MachinePointerInfo::getFixedStack(CLI.DAG.getMachineFunction(), DemoteStackIdx, Offsets[i]), /* Alignment = */ 1); ReturnValues[i] = L; Chains[i] = L.getValue(1); } CLI.Chain = CLI.DAG.getNode(ISD::TokenFactor, CLI.DL, MVT::Other, Chains); } else { // Collect the legal value parts into potentially illegal values // that correspond to the original function's return values. Optional<ISD::NodeType> AssertOp; if (CLI.RetSExt) AssertOp = ISD::AssertSext; else if (CLI.RetZExt) AssertOp = ISD::AssertZext; unsigned CurReg = 0; for (unsigned I = 0, E = RetTys.size(); I != E; ++I) { EVT VT = RetTys[I]; MVT RegisterVT = getRegisterTypeForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); unsigned NumRegs = getNumRegistersForCallingConv(CLI.RetTy->getContext(), CLI.CallConv, VT); ReturnValues.push_back(getCopyFromParts(CLI.DAG, CLI.DL, &InVals[CurReg], NumRegs, RegisterVT, VT, nullptr, CLI.CallConv, AssertOp)); CurReg += NumRegs; } // For a function returning void, there is no return value. We can't create // such a node, so we just return a null return value in that case. In // that case, nothing will actually look at the value. if (ReturnValues.empty()) return std::make_pair(SDValue(), CLI.Chain); } SDValue Res = CLI.DAG.getNode(ISD::MERGE_VALUES, CLI.DL, CLI.DAG.getVTList(RetTys), ReturnValues); return std::make_pair(Res, CLI.Chain); } void TargetLowering::LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { if (SDValue Res = LowerOperation(SDValue(N, 0), DAG)) Results.push_back(Res); } SDValue TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { llvm_unreachable("LowerOperation not implemented for this target!"); } void SelectionDAGBuilder::CopyValueToVirtualRegister(const Value *V, unsigned Reg) { SDValue Op = getNonRegisterValue(V); assert((Op.getOpcode() != ISD::CopyFromReg || cast<RegisterSDNode>(Op.getOperand(1))->getReg() != Reg) && "Copy from a reg to the same reg!"); assert(!Register::isPhysicalRegister(Reg) && "Is a physreg"); const TargetLowering &TLI = DAG.getTargetLoweringInfo(); // If this is an InlineAsm we have to match the registers required, not the // notional registers required by the type. RegsForValue RFV(V->getContext(), TLI, DAG.getDataLayout(), Reg, V->getType(), None); // This is not an ABI copy. SDValue Chain = DAG.getEntryNode(); ISD::NodeType ExtendType = (FuncInfo.PreferredExtendType.find(V) == FuncInfo.PreferredExtendType.end()) ? ISD::ANY_EXTEND : FuncInfo.PreferredExtendType[V]; RFV.getCopyToRegs(Op, DAG, getCurSDLoc(), Chain, nullptr, V, ExtendType); PendingExports.push_back(Chain); } #include "llvm/CodeGen/SelectionDAGISel.h" /// isOnlyUsedInEntryBlock - If the specified argument is only used in the /// entry block, return true. This includes arguments used by switches, since /// the switch may expand into multiple basic blocks. static bool isOnlyUsedInEntryBlock(const Argument *A, bool FastISel) { // With FastISel active, we may be splitting blocks, so force creation // of virtual registers for all non-dead arguments. if (FastISel) return A->use_empty(); const BasicBlock &Entry = A->getParent()->front(); for (const User *U : A->users()) if (cast<Instruction>(U)->getParent() != &Entry || isa<SwitchInst>(U)) return false; // Use not in entry block. return true; } using ArgCopyElisionMapTy = DenseMap<const Argument *, std::pair<const AllocaInst *, const StoreInst *>>; /// Scan the entry block of the function in FuncInfo for arguments that look /// like copies into a local alloca. Record any copied arguments in /// ArgCopyElisionCandidates. static void findArgumentCopyElisionCandidates(const DataLayout &DL, FunctionLoweringInfo *FuncInfo, ArgCopyElisionMapTy &ArgCopyElisionCandidates) { // Record the state of every static alloca used in the entry block. Argument // allocas are all used in the entry block, so we need approximately as many // entries as we have arguments. enum StaticAllocaInfo { Unknown, Clobbered, Elidable }; SmallDenseMap<const AllocaInst *, StaticAllocaInfo, 8> StaticAllocas; unsigned NumArgs = FuncInfo->Fn->arg_size(); StaticAllocas.reserve(NumArgs * 2); auto GetInfoIfStaticAlloca = [&](const Value *V) -> StaticAllocaInfo * { if (!V) return nullptr; V = V->stripPointerCasts(); const auto *AI = dyn_cast<AllocaInst>(V); if (!AI || !AI->isStaticAlloca() || !FuncInfo->StaticAllocaMap.count(AI)) return nullptr; auto Iter = StaticAllocas.insert({AI, Unknown}); return &Iter.first->second; }; // Look for stores of arguments to static allocas. Look through bitcasts and // GEPs to handle type coercions, as long as the alloca is fully initialized // by the store. Any non-store use of an alloca escapes it and any subsequent // unanalyzed store might write it. // FIXME: Handle structs initialized with multiple stores. for (const Instruction &I : FuncInfo->Fn->getEntryBlock()) { // Look for stores, and handle non-store uses conservatively. const auto *SI = dyn_cast<StoreInst>(&I); if (!SI) { // We will look through cast uses, so ignore them completely. if (I.isCast()) continue; // Ignore debug info intrinsics, they don't escape or store to allocas. if (isa<DbgInfoIntrinsic>(I)) continue; // This is an unknown instruction. Assume it escapes or writes to all // static alloca operands. for (const Use &U : I.operands()) { if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(U)) *Info = StaticAllocaInfo::Clobbered; } continue; } // If the stored value is a static alloca, mark it as escaped. if (StaticAllocaInfo *Info = GetInfoIfStaticAlloca(SI->getValueOperand())) *Info = StaticAllocaInfo::Clobbered; // Check if the destination is a static alloca. const Value *Dst = SI->getPointerOperand()->stripPointerCasts(); StaticAllocaInfo *Info = GetInfoIfStaticAlloca(Dst); if (!Info) continue; const AllocaInst *AI = cast<AllocaInst>(Dst); // Skip allocas that have been initialized or clobbered. if (*Info != StaticAllocaInfo::Unknown) continue; // Check if the stored value is an argument, and that this store fully // initializes the alloca. Don't elide copies from the same argument twice. const Value *Val = SI->getValueOperand()->stripPointerCasts(); const auto *Arg = dyn_cast<Argument>(Val); if (!Arg || Arg->hasInAllocaAttr() || Arg->hasByValAttr() || Arg->getType()->isEmptyTy() || DL.getTypeStoreSize(Arg->getType()) != DL.getTypeAllocSize(AI->getAllocatedType()) || ArgCopyElisionCandidates.count(Arg)) { *Info = StaticAllocaInfo::Clobbered; continue; } LLVM_DEBUG(dbgs() << "Found argument copy elision candidate: " << *AI << '\n'); // Mark this alloca and store for argument copy elision. *Info = StaticAllocaInfo::Elidable; ArgCopyElisionCandidates.insert({Arg, {AI, SI}}); // Stop scanning if we've seen all arguments. This will happen early in -O0 // builds, which is useful, because -O0 builds have large entry blocks and // many allocas. if (ArgCopyElisionCandidates.size() == NumArgs) break; } } /// Try to elide argument copies from memory into a local alloca. Succeeds if /// ArgVal is a load from a suitable fixed stack object. static void tryToElideArgumentCopy( FunctionLoweringInfo *FuncInfo, SmallVectorImpl<SDValue> &Chains, DenseMap<int, int> &ArgCopyElisionFrameIndexMap, SmallPtrSetImpl<const Instruction *> &ElidedArgCopyInstrs, ArgCopyElisionMapTy &ArgCopyElisionCandidates, const Argument &Arg, SDValue ArgVal, bool &ArgHasUses) { // Check if this is a load from a fixed stack object. auto *LNode = dyn_cast<LoadSDNode>(ArgVal); if (!LNode) return; auto *FINode = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode()); if (!FINode) return; // Check that the fixed stack object is the right size and alignment. // Look at the alignment that the user wrote on the alloca instead of looking // at the stack object. auto ArgCopyIter = ArgCopyElisionCandidates.find(&Arg); assert(ArgCopyIter != ArgCopyElisionCandidates.end()); const AllocaInst *AI = ArgCopyIter->second.first; int FixedIndex = FINode->getIndex(); int &AllocaIndex = FuncInfo->StaticAllocaMap[AI]; int OldIndex = AllocaIndex; MachineFrameInfo &MFI = FuncInfo->MF->getFrameInfo(); if (MFI.getObjectSize(FixedIndex) != MFI.getObjectSize(OldIndex)) { LLVM_DEBUG( dbgs() << " argument copy elision failed due to bad fixed stack " "object size\n"); return; } unsigned RequiredAlignment = AI->getAlignment(); if (!RequiredAlignment) { RequiredAlignment = FuncInfo->MF->getDataLayout().getABITypeAlignment( AI->getAllocatedType()); } if (MFI.getObjectAlignment(FixedIndex) < RequiredAlignment) { LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca " "greater than stack argument alignment (" << RequiredAlignment << " vs " << MFI.getObjectAlignment(FixedIndex) << ")\n"); return; } // Perform the elision. Delete the old stack object and replace its only use // in the variable info map. Mark the stack object as mutable. LLVM_DEBUG({ dbgs() << "Eliding argument copy from " << Arg << " to " << *AI << '\n' << " Replacing frame index " << OldIndex << " with " << FixedIndex << '\n'; }); MFI.RemoveStackObject(OldIndex); MFI.setIsImmutableObjectIndex(FixedIndex, false); AllocaIndex = FixedIndex; ArgCopyElisionFrameIndexMap.insert({OldIndex, FixedIndex}); Chains.push_back(ArgVal.getValue(1)); // Avoid emitting code for the store implementing the copy. const StoreInst *SI = ArgCopyIter->second.second; ElidedArgCopyInstrs.insert(SI); // Check for uses of the argument again so that we can avoid exporting ArgVal // if it is't used by anything other than the store. for (const Value *U : Arg.users()) { if (U != SI) { ArgHasUses = true; break; } } } void SelectionDAGISel::LowerArguments(const Function &F) { SelectionDAG &DAG = SDB->DAG; SDLoc dl = SDB->getCurSDLoc(); const DataLayout &DL = DAG.getDataLayout(); SmallVector<ISD::InputArg, 16> Ins; if (!FuncInfo->CanLowerReturn) { // Put in an sret pointer parameter before all the other parameters. SmallVector<EVT, 1> ValueVTs; ComputeValueVTs(*TLI, DAG.getDataLayout(), F.getReturnType()->getPointerTo( DAG.getDataLayout().getAllocaAddrSpace()), ValueVTs); // NOTE: Assuming that a pointer will never break down to more than one VT // or one register. ISD::ArgFlagsTy Flags; Flags.setSRet(); MVT RegisterVT = TLI->getRegisterType(*DAG.getContext(), ValueVTs[0]); ISD::InputArg RetArg(Flags, RegisterVT, ValueVTs[0], true, ISD::InputArg::NoArgIndex, 0); Ins.push_back(RetArg); } // Look for stores of arguments to static allocas. Mark such arguments with a // flag to ask the target to give us the memory location of that argument if // available. ArgCopyElisionMapTy ArgCopyElisionCandidates; findArgumentCopyElisionCandidates(DL, FuncInfo, ArgCopyElisionCandidates); // Set up the incoming argument description vector. for (const Argument &Arg : F.args()) { unsigned ArgNo = Arg.getArgNo(); SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); bool isArgValueUsed = !Arg.use_empty(); unsigned PartBase = 0; Type *FinalType = Arg.getType(); if (Arg.hasAttribute(Attribute::ByVal)) FinalType = Arg.getParamByValType(); bool NeedsRegBlock = TLI->functionArgumentNeedsConsecutiveRegisters( FinalType, F.getCallingConv(), F.isVarArg()); for (unsigned Value = 0, NumValues = ValueVTs.size(); Value != NumValues; ++Value) { EVT VT = ValueVTs[Value]; Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); ISD::ArgFlagsTy Flags; // Certain targets (such as MIPS), may have a different ABI alignment // for a type depending on the context. Give the target a chance to // specify the alignment it wants. unsigned OriginalAlignment = TLI->getABIAlignmentForCallingConv(ArgTy, DL); if (Arg.getType()->isPointerTy()) { Flags.setPointer(); Flags.setPointerAddrSpace( cast<PointerType>(Arg.getType())->getAddressSpace()); } if (Arg.hasAttribute(Attribute::ZExt)) Flags.setZExt(); if (Arg.hasAttribute(Attribute::SExt)) Flags.setSExt(); if (Arg.hasAttribute(Attribute::InReg)) { // If we are using vectorcall calling convention, a structure that is // passed InReg - is surely an HVA if (F.getCallingConv() == CallingConv::X86_VectorCall && isa<StructType>(Arg.getType())) { // The first value of a structure is marked if (0 == Value) Flags.setHvaStart(); Flags.setHva(); } // Set InReg Flag Flags.setInReg(); } if (Arg.hasAttribute(Attribute::StructRet)) Flags.setSRet(); if (Arg.hasAttribute(Attribute::SwiftSelf)) Flags.setSwiftSelf(); if (Arg.hasAttribute(Attribute::SwiftError)) Flags.setSwiftError(); if (Arg.hasAttribute(Attribute::ByVal)) Flags.setByVal(); if (Arg.hasAttribute(Attribute::InAlloca)) { Flags.setInAlloca(); // Set the byval flag for CCAssignFn callbacks that don't know about // inalloca. This way we can know how many bytes we should've allocated // and how many bytes a callee cleanup function will pop. If we port // inalloca to more targets, we'll have to add custom inalloca handling // in the various CC lowering callbacks. Flags.setByVal(); } if (F.getCallingConv() == CallingConv::X86_INTR) { // IA Interrupt passes frame (1st parameter) by value in the stack. if (ArgNo == 0) Flags.setByVal(); } if (Flags.isByVal() || Flags.isInAlloca()) { Type *ElementTy = Arg.getParamByValType(); // For ByVal, size and alignment should be passed from FE. BE will // guess if this info is not there but there are cases it cannot get // right. unsigned FrameSize = DL.getTypeAllocSize(Arg.getParamByValType()); Flags.setByValSize(FrameSize); unsigned FrameAlign; if (Arg.getParamAlignment()) FrameAlign = Arg.getParamAlignment(); else FrameAlign = TLI->getByValTypeAlignment(ElementTy, DL); Flags.setByValAlign(FrameAlign); } if (Arg.hasAttribute(Attribute::Nest)) Flags.setNest(); if (NeedsRegBlock) Flags.setInConsecutiveRegs(); Flags.setOrigAlign(OriginalAlignment); if (ArgCopyElisionCandidates.count(&Arg)) Flags.setCopyElisionCandidate(); if (Arg.hasAttribute(Attribute::Returned)) Flags.setReturned(); MVT RegisterVT = TLI->getRegisterTypeForCallingConv( *CurDAG->getContext(), F.getCallingConv(), VT); unsigned NumRegs = TLI->getNumRegistersForCallingConv( *CurDAG->getContext(), F.getCallingConv(), VT); for (unsigned i = 0; i != NumRegs; ++i) { ISD::InputArg MyFlags(Flags, RegisterVT, VT, isArgValueUsed, ArgNo, PartBase+i*RegisterVT.getStoreSize()); if (NumRegs > 1 && i == 0) MyFlags.Flags.setSplit(); // if it isn't first piece, alignment must be 1 else if (i > 0) { MyFlags.Flags.setOrigAlign(1); if (i == NumRegs - 1) MyFlags.Flags.setSplitEnd(); } Ins.push_back(MyFlags); } if (NeedsRegBlock && Value == NumValues - 1) Ins[Ins.size() - 1].Flags.setInConsecutiveRegsLast(); PartBase += VT.getStoreSize(); } } // Call the target to set up the argument values. SmallVector<SDValue, 8> InVals; SDValue NewRoot = TLI->LowerFormalArguments( DAG.getRoot(), F.getCallingConv(), F.isVarArg(), Ins, dl, DAG, InVals); // Verify that the target's LowerFormalArguments behaved as expected. assert(NewRoot.getNode() && NewRoot.getValueType() == MVT::Other && "LowerFormalArguments didn't return a valid chain!"); assert(InVals.size() == Ins.size() && "LowerFormalArguments didn't emit the correct number of values!"); LLVM_DEBUG({ for (unsigned i = 0, e = Ins.size(); i != e; ++i) { assert(InVals[i].getNode() && "LowerFormalArguments emitted a null value!"); assert(EVT(Ins[i].VT) == InVals[i].getValueType() && "LowerFormalArguments emitted a value with the wrong type!"); } }); // Update the DAG with the new chain value resulting from argument lowering. DAG.setRoot(NewRoot); // Set up the argument values. unsigned i = 0; if (!FuncInfo->CanLowerReturn) { // Create a virtual register for the sret pointer, and put in a copy // from the sret argument into it. SmallVector<EVT, 1> ValueVTs; ComputeValueVTs(*TLI, DAG.getDataLayout(), F.getReturnType()->getPointerTo( DAG.getDataLayout().getAllocaAddrSpace()), ValueVTs); MVT VT = ValueVTs[0].getSimpleVT(); MVT RegVT = TLI->getRegisterType(*CurDAG->getContext(), VT); Optional<ISD::NodeType> AssertOp = None; SDValue ArgValue = getCopyFromParts(DAG, dl, &InVals[0], 1, RegVT, VT, nullptr, F.getCallingConv(), AssertOp); MachineFunction& MF = SDB->DAG.getMachineFunction(); MachineRegisterInfo& RegInfo = MF.getRegInfo(); Register SRetReg = RegInfo.createVirtualRegister(TLI->getRegClassFor(RegVT)); FuncInfo->DemoteRegister = SRetReg; NewRoot = SDB->DAG.getCopyToReg(NewRoot, SDB->getCurSDLoc(), SRetReg, ArgValue); DAG.setRoot(NewRoot); // i indexes lowered arguments. Bump it past the hidden sret argument. ++i; } SmallVector<SDValue, 4> Chains; DenseMap<int, int> ArgCopyElisionFrameIndexMap; for (const Argument &Arg : F.args()) { SmallVector<SDValue, 4> ArgValues; SmallVector<EVT, 4> ValueVTs; ComputeValueVTs(*TLI, DAG.getDataLayout(), Arg.getType(), ValueVTs); unsigned NumValues = ValueVTs.size(); if (NumValues == 0) continue; bool ArgHasUses = !Arg.use_empty(); // Elide the copying store if the target loaded this argument from a // suitable fixed stack object. if (Ins[i].Flags.isCopyElisionCandidate()) { tryToElideArgumentCopy(FuncInfo, Chains, ArgCopyElisionFrameIndexMap, ElidedArgCopyInstrs, ArgCopyElisionCandidates, Arg, InVals[i], ArgHasUses); } // If this argument is unused then remember its value. It is used to generate // debugging information. bool isSwiftErrorArg = TLI->supportSwiftError() && Arg.hasAttribute(Attribute::SwiftError); if (!ArgHasUses && !isSwiftErrorArg) { SDB->setUnusedArgValue(&Arg, InVals[i]); // Also remember any frame index for use in FastISel. if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(InVals[i].getNode())) FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); } for (unsigned Val = 0; Val != NumValues; ++Val) { EVT VT = ValueVTs[Val]; MVT PartVT = TLI->getRegisterTypeForCallingConv(*CurDAG->getContext(), F.getCallingConv(), VT); unsigned NumParts = TLI->getNumRegistersForCallingConv( *CurDAG->getContext(), F.getCallingConv(), VT); // Even an apparant 'unused' swifterror argument needs to be returned. So // we do generate a copy for it that can be used on return from the // function. if (ArgHasUses || isSwiftErrorArg) { Optional<ISD::NodeType> AssertOp; if (Arg.hasAttribute(Attribute::SExt)) AssertOp = ISD::AssertSext; else if (Arg.hasAttribute(Attribute::ZExt)) AssertOp = ISD::AssertZext; ArgValues.push_back(getCopyFromParts(DAG, dl, &InVals[i], NumParts, PartVT, VT, nullptr, F.getCallingConv(), AssertOp)); } i += NumParts; } // We don't need to do anything else for unused arguments. if (ArgValues.empty()) continue; // Note down frame index. if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(ArgValues[0].getNode())) FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); SDValue Res = DAG.getMergeValues(makeArrayRef(ArgValues.data(), NumValues), SDB->getCurSDLoc()); SDB->setValue(&Arg, Res); if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::BUILD_PAIR) { // We want to associate the argument with the frame index, among // involved operands, that correspond to the lowest address. The // getCopyFromParts function, called earlier, is swapping the order of // the operands to BUILD_PAIR depending on endianness. The result of // that swapping is that the least significant bits of the argument will // be in the first operand of the BUILD_PAIR node, and the most // significant bits will be in the second operand. unsigned LowAddressOp = DAG.getDataLayout().isBigEndian() ? 1 : 0; if (LoadSDNode *LNode = dyn_cast<LoadSDNode>(Res.getOperand(LowAddressOp).getNode())) if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(LNode->getBasePtr().getNode())) FuncInfo->setArgumentFrameIndex(&Arg, FI->getIndex()); } // Analyses past this point are naive and don't expect an assertion. if (Res.getOpcode() == ISD::AssertZext) Res = Res.getOperand(0); // Update the SwiftErrorVRegDefMap. if (Res.getOpcode() == ISD::CopyFromReg && isSwiftErrorArg) { unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); if (Register::isVirtualRegister(Reg)) SwiftError->setCurrentVReg(FuncInfo->MBB, SwiftError->getFunctionArg(), Reg); } // If this argument is live outside of the entry block, insert a copy from // wherever we got it to the vreg that other BB's will reference it as. if (Res.getOpcode() == ISD::CopyFromReg) { // If we can, though, try to skip creating an unnecessary vreg. // FIXME: This isn't very clean... it would be nice to make this more // general. unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg(); if (Register::isVirtualRegister(Reg)) { FuncInfo->ValueMap[&Arg] = Reg; continue; } } if (!isOnlyUsedInEntryBlock(&Arg, TM.Options.EnableFastISel)) { FuncInfo->InitializeRegForValue(&Arg); SDB->CopyToExportRegsIfNeeded(&Arg); } } if (!Chains.empty()) { Chains.push_back(NewRoot); NewRoot = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains); } DAG.setRoot(NewRoot); assert(i == InVals.size() && "Argument register count mismatch!"); // If any argument copy elisions occurred and we have debug info, update the // stale frame indices used in the dbg.declare variable info table. MachineFunction::VariableDbgInfoMapTy &DbgDeclareInfo = MF->getVariableDbgInfo(); if (!DbgDeclareInfo.empty() && !ArgCopyElisionFrameIndexMap.empty()) { for (MachineFunction::VariableDbgInfo &VI : DbgDeclareInfo) { auto I = ArgCopyElisionFrameIndexMap.find(VI.Slot); if (I != ArgCopyElisionFrameIndexMap.end()) VI.Slot = I->second; } } // Finally, if the target has anything special to do, allow it to do so. EmitFunctionEntryCode(); } /// Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to /// ensure constants are generated when needed. Remember the virtual registers /// that need to be added to the Machine PHI nodes as input. We cannot just /// directly add them, because expansion might result in multiple MBB's for one /// BB. As such, the start of the BB might correspond to a different MBB than /// the end. void SelectionDAGBuilder::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { const Instruction *TI = LLVMBB->getTerminator(); SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled; // Check PHI nodes in successors that expect a value to be available from this // block. for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) { const BasicBlock *SuccBB = TI->getSuccessor(succ); if (!isa<PHINode>(SuccBB->begin())) continue; MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB]; // If this terminator has multiple identical successors (common for // switches), only handle each succ once. if (!SuccsHandled.insert(SuccMBB).second) continue; MachineBasicBlock::iterator MBBI = SuccMBB->begin(); // At this point we know that there is a 1-1 correspondence between LLVM PHI // nodes and Machine PHI nodes, but the incoming operands have not been // emitted yet. for (const PHINode &PN : SuccBB->phis()) { // Ignore dead phi's. if (PN.use_empty()) continue; // Skip empty types if (PN.getType()->isEmptyTy()) continue; unsigned Reg; const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB); if (const Constant *C = dyn_cast<Constant>(PHIOp)) { unsigned &RegOut = ConstantsOut[C]; if (RegOut == 0) { RegOut = FuncInfo.CreateRegs(C); CopyValueToVirtualRegister(C, RegOut); } Reg = RegOut; } else { DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(PHIOp); if (I != FuncInfo.ValueMap.end()) Reg = I->second; else { assert(isa<AllocaInst>(PHIOp) && FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) && "Didn't codegen value into a register!??"); Reg = FuncInfo.CreateRegs(PHIOp); CopyValueToVirtualRegister(PHIOp, Reg); } } // Remember that this register needs to added to the machine PHI node as // the input for this MBB. SmallVector<EVT, 4> ValueVTs; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); ComputeValueVTs(TLI, DAG.getDataLayout(), PN.getType(), ValueVTs); for (unsigned vti = 0, vte = ValueVTs.size(); vti != vte; ++vti) { EVT VT = ValueVTs[vti]; unsigned NumRegisters = TLI.getNumRegisters(*DAG.getContext(), VT); for (unsigned i = 0, e = NumRegisters; i != e; ++i) FuncInfo.PHINodesToUpdate.push_back( std::make_pair(&*MBBI++, Reg + i)); Reg += NumRegisters; } } } ConstantsOut.clear(); } /// Add a successor MBB to ParentMBB< creating a new MachineBB for BB if SuccMBB /// is 0. MachineBasicBlock * SelectionDAGBuilder::StackProtectorDescriptor:: AddSuccessorMBB(const BasicBlock *BB, MachineBasicBlock *ParentMBB, bool IsLikely, MachineBasicBlock *SuccMBB) { // If SuccBB has not been created yet, create it. if (!SuccMBB) { MachineFunction *MF = ParentMBB->getParent(); MachineFunction::iterator BBI(ParentMBB); SuccMBB = MF->CreateMachineBasicBlock(BB); MF->insert(++BBI, SuccMBB); } // Add it as a successor of ParentMBB. ParentMBB->addSuccessor( SuccMBB, BranchProbabilityInfo::getBranchProbStackProtector(IsLikely)); return SuccMBB; } MachineBasicBlock *SelectionDAGBuilder::NextBlock(MachineBasicBlock *MBB) { MachineFunction::iterator I(MBB); if (++I == FuncInfo.MF->end()) return nullptr; return &*I; } /// During lowering new call nodes can be created (such as memset, etc.). /// Those will become new roots of the current DAG, but complications arise /// when they are tail calls. In such cases, the call lowering will update /// the root, but the builder still needs to know that a tail call has been /// lowered in order to avoid generating an additional return. void SelectionDAGBuilder::updateDAGForMaybeTailCall(SDValue MaybeTC) { // If the node is null, we do have a tail call. if (MaybeTC.getNode() != nullptr) DAG.setRoot(MaybeTC); else HasTailCall = true; } void SelectionDAGBuilder::lowerWorkItem(SwitchWorkListItem W, Value *Cond, MachineBasicBlock *SwitchMBB, MachineBasicBlock *DefaultMBB) { MachineFunction *CurMF = FuncInfo.MF; MachineBasicBlock *NextMBB = nullptr; MachineFunction::iterator BBI(W.MBB); if (++BBI != FuncInfo.MF->end()) NextMBB = &*BBI; unsigned Size = W.LastCluster - W.FirstCluster + 1; BranchProbabilityInfo *BPI = FuncInfo.BPI; if (Size == 2 && W.MBB == SwitchMBB) { // If any two of the cases has the same destination, and if one value // is the same as the other, but has one bit unset that the other has set, // use bit manipulation to do two compares at once. For example: // "if (X == 6 || X == 4)" -> "if ((X|2) == 6)" // TODO: This could be extended to merge any 2 cases in switches with 3 // cases. // TODO: Handle cases where W.CaseBB != SwitchBB. CaseCluster &Small = *W.FirstCluster; CaseCluster &Big = *W.LastCluster; if (Small.Low == Small.High && Big.Low == Big.High && Small.MBB == Big.MBB) { const APInt &SmallValue = Small.Low->getValue(); const APInt &BigValue = Big.Low->getValue(); // Check that there is only one bit different. APInt CommonBit = BigValue ^ SmallValue; if (CommonBit.isPowerOf2()) { SDValue CondLHS = getValue(Cond); EVT VT = CondLHS.getValueType(); SDLoc DL = getCurSDLoc(); SDValue Or = DAG.getNode(ISD::OR, DL, VT, CondLHS, DAG.getConstant(CommonBit, DL, VT)); SDValue Cond = DAG.getSetCC( DL, MVT::i1, Or, DAG.getConstant(BigValue | SmallValue, DL, VT), ISD::SETEQ); // Update successor info. // Both Small and Big will jump to Small.BB, so we sum up the // probabilities. addSuccessorWithProb(SwitchMBB, Small.MBB, Small.Prob + Big.Prob); if (BPI) addSuccessorWithProb( SwitchMBB, DefaultMBB, // The default destination is the first successor in IR. BPI->getEdgeProbability(SwitchMBB->getBasicBlock(), (unsigned)0)); else addSuccessorWithProb(SwitchMBB, DefaultMBB); // Insert the true branch. SDValue BrCond = DAG.getNode(ISD::BRCOND, DL, MVT::Other, getControlRoot(), Cond, DAG.getBasicBlock(Small.MBB)); // Insert the false branch. BrCond = DAG.getNode(ISD::BR, DL, MVT::Other, BrCond, DAG.getBasicBlock(DefaultMBB)); DAG.setRoot(BrCond); return; } } } if (TM.getOptLevel() != CodeGenOpt::None) { // Here, we order cases by probability so the most likely case will be // checked first. However, two clusters can have the same probability in // which case their relative ordering is non-deterministic. So we use Low // as a tie-breaker as clusters are guaranteed to never overlap. llvm::sort(W.FirstCluster, W.LastCluster + 1, [](const CaseCluster &a, const CaseCluster &b) { return a.Prob != b.Prob ? a.Prob > b.Prob : a.Low->getValue().slt(b.Low->getValue()); }); // Rearrange the case blocks so that the last one falls through if possible // without changing the order of probabilities. for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster; ) { --I; if (I->Prob > W.LastCluster->Prob) break; if (I->Kind == CC_Range && I->MBB == NextMBB) { std::swap(*I, *W.LastCluster); break; } } } // Compute total probability. BranchProbability DefaultProb = W.DefaultProb; BranchProbability UnhandledProbs = DefaultProb; for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I) UnhandledProbs += I->Prob; MachineBasicBlock *CurMBB = W.MBB; for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) { bool FallthroughUnreachable = false; MachineBasicBlock *Fallthrough; if (I == W.LastCluster) { // For the last cluster, fall through to the default destination. Fallthrough = DefaultMBB; FallthroughUnreachable = isa<UnreachableInst>( DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg()); } else { Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock()); CurMF->insert(BBI, Fallthrough); // Put Cond in a virtual register to make it available from the new blocks. ExportFromCurrentBlock(Cond); } UnhandledProbs -= I->Prob; switch (I->Kind) { case CC_JumpTable: { // FIXME: Optimize away range check based on pivot comparisons. JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first; SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second; // The jump block hasn't been inserted yet; insert it here. MachineBasicBlock *JumpMBB = JT->MBB; CurMF->insert(BBI, JumpMBB); auto JumpProb = I->Prob; auto FallthroughProb = UnhandledProbs; // If the default statement is a target of the jump table, we evenly // distribute the default probability to successors of CurMBB. Also // update the probability on the edge from JumpMBB to Fallthrough. for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(), SE = JumpMBB->succ_end(); SI != SE; ++SI) { if (*SI == DefaultMBB) { JumpProb += DefaultProb / 2; FallthroughProb -= DefaultProb / 2; JumpMBB->setSuccProbability(SI, DefaultProb / 2); JumpMBB->normalizeSuccProbs(); break; } } if (FallthroughUnreachable) { // Skip the range check if the fallthrough block is unreachable. JTH->OmitRangeCheck = true; } if (!JTH->OmitRangeCheck) addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb); addSuccessorWithProb(CurMBB, JumpMBB, JumpProb); CurMBB->normalizeSuccProbs(); // The jump table header will be inserted in our current block, do the // range check, and fall through to our fallthrough block. JTH->HeaderBB = CurMBB; JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader. // If we're in the right place, emit the jump table header right now. if (CurMBB == SwitchMBB) { visitJumpTableHeader(*JT, *JTH, SwitchMBB); JTH->Emitted = true; } break; } case CC_BitTests: { // FIXME: If Fallthrough is unreachable, skip the range check. // FIXME: Optimize away range check based on pivot comparisons. BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex]; // The bit test blocks haven't been inserted yet; insert them here. for (BitTestCase &BTC : BTB->Cases) CurMF->insert(BBI, BTC.ThisBB); // Fill in fields of the BitTestBlock. BTB->Parent = CurMBB; BTB->Default = Fallthrough; BTB->DefaultProb = UnhandledProbs; // If the cases in bit test don't form a contiguous range, we evenly // distribute the probability on the edge to Fallthrough to two // successors of CurMBB. if (!BTB->ContiguousRange) { BTB->Prob += DefaultProb / 2; BTB->DefaultProb -= DefaultProb / 2; } // If we're in the right place, emit the bit test header right now. if (CurMBB == SwitchMBB) { visitBitTestHeader(*BTB, SwitchMBB); BTB->Emitted = true; } break; } case CC_Range: { const Value *RHS, *LHS, *MHS; ISD::CondCode CC; if (I->Low == I->High) { // Check Cond == I->Low. CC = ISD::SETEQ; LHS = Cond; RHS=I->Low; MHS = nullptr; } else { // Check I->Low <= Cond <= I->High. CC = ISD::SETLE; LHS = I->Low; MHS = Cond; RHS = I->High; } // If Fallthrough is unreachable, fold away the comparison. if (FallthroughUnreachable) CC = ISD::SETTRUE; // The false probability is the sum of all unhandled cases. CaseBlock CB(CC, LHS, RHS, MHS, I->MBB, Fallthrough, CurMBB, getCurSDLoc(), I->Prob, UnhandledProbs); if (CurMBB == SwitchMBB) visitSwitchCase(CB, SwitchMBB); else SL->SwitchCases.push_back(CB); break; } } CurMBB = Fallthrough; } } unsigned SelectionDAGBuilder::caseClusterRank(const CaseCluster &CC, CaseClusterIt First, CaseClusterIt Last) { return std::count_if(First, Last + 1, [&](const CaseCluster &X) { if (X.Prob != CC.Prob) return X.Prob > CC.Prob; // Ties are broken by comparing the case value. return X.Low->getValue().slt(CC.Low->getValue()); }); } void SelectionDAGBuilder::splitWorkItem(SwitchWorkList &WorkList, const SwitchWorkListItem &W, Value *Cond, MachineBasicBlock *SwitchMBB) { assert(W.FirstCluster->Low->getValue().slt(W.LastCluster->Low->getValue()) && "Clusters not sorted?"); assert(W.LastCluster - W.FirstCluster + 1 >= 2 && "Too small to split!"); // Balance the tree based on branch probabilities to create a near-optimal (in // terms of search time given key frequency) binary search tree. See e.g. Kurt // Mehlhorn "Nearly Optimal Binary Search Trees" (1975). CaseClusterIt LastLeft = W.FirstCluster; CaseClusterIt FirstRight = W.LastCluster; auto LeftProb = LastLeft->Prob + W.DefaultProb / 2; auto RightProb = FirstRight->Prob + W.DefaultProb / 2; // Move LastLeft and FirstRight towards each other from opposite directions to // find a partitioning of the clusters which balances the probability on both // sides. If LeftProb and RightProb are equal, alternate which side is // taken to ensure 0-probability nodes are distributed evenly. unsigned I = 0; while (LastLeft + 1 < FirstRight) { if (LeftProb < RightProb || (LeftProb == RightProb && (I & 1))) LeftProb += (++LastLeft)->Prob; else RightProb += (--FirstRight)->Prob; I++; } while (true) { // Our binary search tree differs from a typical BST in that ours can have up // to three values in each leaf. The pivot selection above doesn't take that // into account, which means the tree might require more nodes and be less // efficient. We compensate for this here. unsigned NumLeft = LastLeft - W.FirstCluster + 1; unsigned NumRight = W.LastCluster - FirstRight + 1; if (std::min(NumLeft, NumRight) < 3 && std::max(NumLeft, NumRight) > 3) { // If one side has less than 3 clusters, and the other has more than 3, // consider taking a cluster from the other side. if (NumLeft < NumRight) { // Consider moving the first cluster on the right to the left side. CaseCluster &CC = *FirstRight; unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); if (LeftSideRank <= RightSideRank) { // Moving the cluster to the left does not demote it. ++LastLeft; ++FirstRight; continue; } } else { assert(NumRight < NumLeft); // Consider moving the last element on the left to the right side. CaseCluster &CC = *LastLeft; unsigned LeftSideRank = caseClusterRank(CC, W.FirstCluster, LastLeft); unsigned RightSideRank = caseClusterRank(CC, FirstRight, W.LastCluster); if (RightSideRank <= LeftSideRank) { // Moving the cluster to the right does not demot it. --LastLeft; --FirstRight; continue; } } } break; } assert(LastLeft + 1 == FirstRight); assert(LastLeft >= W.FirstCluster); assert(FirstRight <= W.LastCluster); // Use the first element on the right as pivot since we will make less-than // comparisons against it. CaseClusterIt PivotCluster = FirstRight; assert(PivotCluster > W.FirstCluster); assert(PivotCluster <= W.LastCluster); CaseClusterIt FirstLeft = W.FirstCluster; CaseClusterIt LastRight = W.LastCluster; const ConstantInt *Pivot = PivotCluster->Low; // New blocks will be inserted immediately after the current one. MachineFunction::iterator BBI(W.MBB); ++BBI; // We will branch to the LHS if Value < Pivot. If LHS is a single cluster, // we can branch to its destination directly if it's squeezed exactly in // between the known lower bound and Pivot - 1. MachineBasicBlock *LeftMBB; if (FirstLeft == LastLeft && FirstLeft->Kind == CC_Range && FirstLeft->Low == W.GE && (FirstLeft->High->getValue() + 1LL) == Pivot->getValue()) { LeftMBB = FirstLeft->MBB; } else { LeftMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); FuncInfo.MF->insert(BBI, LeftMBB); WorkList.push_back( {LeftMBB, FirstLeft, LastLeft, W.GE, Pivot, W.DefaultProb / 2}); // Put Cond in a virtual register to make it available from the new blocks. ExportFromCurrentBlock(Cond); } // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a // single cluster, RHS.Low == Pivot, and we can branch to its destination // directly if RHS.High equals the current upper bound. MachineBasicBlock *RightMBB; if (FirstRight == LastRight && FirstRight->Kind == CC_Range && W.LT && (FirstRight->High->getValue() + 1ULL) == W.LT->getValue()) { RightMBB = FirstRight->MBB; } else { RightMBB = FuncInfo.MF->CreateMachineBasicBlock(W.MBB->getBasicBlock()); FuncInfo.MF->insert(BBI, RightMBB); WorkList.push_back( {RightMBB, FirstRight, LastRight, Pivot, W.LT, W.DefaultProb / 2}); // Put Cond in a virtual register to make it available from the new blocks. ExportFromCurrentBlock(Cond); } // Create the CaseBlock record that will be used to lower the branch. CaseBlock CB(ISD::SETLT, Cond, Pivot, nullptr, LeftMBB, RightMBB, W.MBB, getCurSDLoc(), LeftProb, RightProb); if (W.MBB == SwitchMBB) visitSwitchCase(CB, SwitchMBB); else SL->SwitchCases.push_back(CB); } // Scale CaseProb after peeling a case with the probablity of PeeledCaseProb // from the swith statement. static BranchProbability scaleCaseProbality(BranchProbability CaseProb, BranchProbability PeeledCaseProb) { if (PeeledCaseProb == BranchProbability::getOne()) return BranchProbability::getZero(); BranchProbability SwitchProb = PeeledCaseProb.getCompl(); uint32_t Numerator = CaseProb.getNumerator(); uint32_t Denominator = SwitchProb.scale(CaseProb.getDenominator()); return BranchProbability(Numerator, std::max(Numerator, Denominator)); } // Try to peel the top probability case if it exceeds the threshold. // Return current MachineBasicBlock for the switch statement if the peeling // does not occur. // If the peeling is performed, return the newly created MachineBasicBlock // for the peeled switch statement. Also update Clusters to remove the peeled // case. PeeledCaseProb is the BranchProbability for the peeled case. MachineBasicBlock *SelectionDAGBuilder::peelDominantCaseCluster( const SwitchInst &SI, CaseClusterVector &Clusters, BranchProbability &PeeledCaseProb) { MachineBasicBlock *SwitchMBB = FuncInfo.MBB; // Don't perform if there is only one cluster or optimizing for size. if (SwitchPeelThreshold > 100 || !FuncInfo.BPI || Clusters.size() < 2 || TM.getOptLevel() == CodeGenOpt::None || SwitchMBB->getParent()->getFunction().hasMinSize()) return SwitchMBB; BranchProbability TopCaseProb = BranchProbability(SwitchPeelThreshold, 100); unsigned PeeledCaseIndex = 0; bool SwitchPeeled = false; for (unsigned Index = 0; Index < Clusters.size(); ++Index) { CaseCluster &CC = Clusters[Index]; if (CC.Prob < TopCaseProb) continue; TopCaseProb = CC.Prob; PeeledCaseIndex = Index; SwitchPeeled = true; } if (!SwitchPeeled) return SwitchMBB; LLVM_DEBUG(dbgs() << "Peeled one top case in switch stmt, prob: " << TopCaseProb << "\n"); // Record the MBB for the peeled switch statement. MachineFunction::iterator BBI(SwitchMBB); ++BBI; MachineBasicBlock *PeeledSwitchMBB = FuncInfo.MF->CreateMachineBasicBlock(SwitchMBB->getBasicBlock()); FuncInfo.MF->insert(BBI, PeeledSwitchMBB); ExportFromCurrentBlock(SI.getCondition()); auto PeeledCaseIt = Clusters.begin() + PeeledCaseIndex; SwitchWorkListItem W = {SwitchMBB, PeeledCaseIt, PeeledCaseIt, nullptr, nullptr, TopCaseProb.getCompl()}; lowerWorkItem(W, SI.getCondition(), SwitchMBB, PeeledSwitchMBB); Clusters.erase(PeeledCaseIt); for (CaseCluster &CC : Clusters) { LLVM_DEBUG( dbgs() << "Scale the probablity for one cluster, before scaling: " << CC.Prob << "\n"); CC.Prob = scaleCaseProbality(CC.Prob, TopCaseProb); LLVM_DEBUG(dbgs() << "After scaling: " << CC.Prob << "\n"); } PeeledCaseProb = TopCaseProb; return PeeledSwitchMBB; } void SelectionDAGBuilder::visitSwitch(const SwitchInst &SI) { // Extract cases from the switch. BranchProbabilityInfo *BPI = FuncInfo.BPI; CaseClusterVector Clusters; Clusters.reserve(SI.getNumCases()); for (auto I : SI.cases()) { MachineBasicBlock *Succ = FuncInfo.MBBMap[I.getCaseSuccessor()]; const ConstantInt *CaseVal = I.getCaseValue(); BranchProbability Prob = BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex()) : BranchProbability(1, SI.getNumCases() + 1); Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob)); } MachineBasicBlock *DefaultMBB = FuncInfo.MBBMap[SI.getDefaultDest()]; // Cluster adjacent cases with the same destination. We do this at all // optimization levels because it's cheap to do and will make codegen faster // if there are many clusters. sortAndRangeify(Clusters); // The branch probablity of the peeled case. BranchProbability PeeledCaseProb = BranchProbability::getZero(); MachineBasicBlock *PeeledSwitchMBB = peelDominantCaseCluster(SI, Clusters, PeeledCaseProb); // If there is only the default destination, jump there directly. MachineBasicBlock *SwitchMBB = FuncInfo.MBB; if (Clusters.empty()) { assert(PeeledSwitchMBB == SwitchMBB); SwitchMBB->addSuccessor(DefaultMBB); if (DefaultMBB != NextBlock(SwitchMBB)) { DAG.setRoot(DAG.getNode(ISD::BR, getCurSDLoc(), MVT::Other, getControlRoot(), DAG.getBasicBlock(DefaultMBB))); } return; } SL->findJumpTables(Clusters, &SI, DefaultMBB); SL->findBitTestClusters(Clusters, &SI); LLVM_DEBUG({ dbgs() << "Case clusters: "; for (const CaseCluster &C : Clusters) { if (C.Kind == CC_JumpTable) dbgs() << "JT:"; if (C.Kind == CC_BitTests) dbgs() << "BT:"; C.Low->getValue().print(dbgs(), true); if (C.Low != C.High) { dbgs() << '-'; C.High->getValue().print(dbgs(), true); } dbgs() << ' '; } dbgs() << '\n'; }); assert(!Clusters.empty()); SwitchWorkList WorkList; CaseClusterIt First = Clusters.begin(); CaseClusterIt Last = Clusters.end() - 1; auto DefaultProb = getEdgeProbability(PeeledSwitchMBB, DefaultMBB); // Scale the branchprobability for DefaultMBB if the peel occurs and // DefaultMBB is not replaced. if (PeeledCaseProb != BranchProbability::getZero() && DefaultMBB == FuncInfo.MBBMap[SI.getDefaultDest()]) DefaultProb = scaleCaseProbality(DefaultProb, PeeledCaseProb); WorkList.push_back( {PeeledSwitchMBB, First, Last, nullptr, nullptr, DefaultProb}); while (!WorkList.empty()) { SwitchWorkListItem W = WorkList.back(); WorkList.pop_back(); unsigned NumClusters = W.LastCluster - W.FirstCluster + 1; if (NumClusters > 3 && TM.getOptLevel() != CodeGenOpt::None && !DefaultMBB->getParent()->getFunction().hasMinSize()) { // For optimized builds, lower large range as a balanced binary tree. splitWorkItem(WorkList, W, SI.getCondition(), SwitchMBB); continue; } lowerWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB); } }
#include <tuple> #include "argparse/argparse.hpp" #include "rmi/models.hpp" #include "rmi/rmi.hpp" #include "rmi/util/fn.hpp" using key_type = uint64_t; /** * Computes several metrics on the error interval sizes for a given @p Rmi on dataset @p keys and writes results to * `std::cout`. * @tparam Key key type * @tparam Rmi RMI type * @param keys on which the RMI is built * @param n_models number of models in the second layer of the RMI * @param dataset_name name of the dataset * @param layer1 model type of the first layer * @param layer2 model type of the second layer * @param bound_type used by the RMI */ template<typename Key, typename Rmi> void experiment(const std::vector<key_type> &keys, const std::size_t n_models, const std::string dataset_name, const std::string layer1, const std::string layer2, const std::string bound_type) { using rmi_type = Rmi; // Build RMI. rmi_type rmi(keys, n_models); // Initialize variables. auto n_keys = keys.size(); std::vector<int64_t> interval_sizes; interval_sizes.reserve(n_keys); // Perform predictions. for (auto key : keys) { auto pred = rmi.search(key); // Record interval size. auto interval_size = pred.hi - pred.lo; interval_sizes.push_back(interval_size); } // Report results. // Dataset std::cout << dataset_name << ',' << n_keys << ',' // RMI config << layer1 << ',' << layer2 << ',' << n_models << ',' << bound_type << ',' << rmi.size_in_bytes() << ',' // Interval sizes << mean(interval_sizes) << ',' << median(interval_sizes) << ',' << stdev(interval_sizes) << ',' << min(interval_sizes) << ',' << max(interval_sizes) << std::endl; } /** * @brief experiment function pointer */ typedef void (*exp_fn_ptr)(const std::vector<key_type>&, const std::size_t, const std::string, const std::string, const std::string, const std::string); /** * RMI configuration that holds the string representation of model types of layer 1 and layer 2 and the error bound * type. */ struct Config { std::string layer1; std::string layer2; std::string bound_type; }; /** * Comparator class for @p Config objects. */ struct ConfigCompare { bool operator() (const Config &lhs, const Config &rhs) const { if (lhs.layer1 != rhs.layer1) return lhs.layer1 < rhs.layer1; if (lhs.layer2 != rhs.layer2) return lhs.layer2 < rhs.layer2; return lhs.bound_type < rhs.bound_type; } }; #define ENTRIES(L1, L2, T1, T2) \ { {#L1, #L2, "labs"}, &experiment<key_type, rmi::RmiLAbs<key_type, T1, T2>> }, \ { {#L1, #L2, "lind"}, &experiment<key_type, rmi::RmiLInd<key_type, T1, T2>> }, \ { {#L1, #L2, "gabs"}, &experiment<key_type, rmi::RmiGAbs<key_type, T1, T2>> }, \ { {#L1, #L2, "gind"}, &experiment<key_type, rmi::RmiGInd<key_type, T1, T2>> }, static std::map<Config, exp_fn_ptr, ConfigCompare> exp_map { ENTRIES(linear_regression, linear_regression, rmi::LinearRegression, rmi::LinearRegression) ENTRIES(linear_regression, linear_spline, rmi::LinearRegression, rmi::LinearSpline) ENTRIES(linear_spline, linear_regression, rmi::LinearSpline, rmi::LinearRegression) ENTRIES(linear_spline, linear_spline, rmi::LinearSpline, rmi::LinearSpline) ENTRIES(cubic_spline, linear_regression, rmi::CubicSpline, rmi::LinearRegression) ENTRIES(cubic_spline, linear_spline, rmi::CubicSpline, rmi::LinearSpline) ENTRIES(radix, linear_regression, rmi::Radix<key_type>, rmi::LinearRegression) ENTRIES(radix, linear_spline, rmi::Radix<key_type>, rmi::LinearSpline) }; ///< Map that assigns an experiment function pointer to RMI configurations. #undef ENTRIES /** * Triggers computation of several metrics on the error interval sizes of an RMI configuration provided via command line * arguments. * @param argc arguments counter * @param argv arguments vector */ int main(int argc, char *argv[]) { // Initialize argument parser. argparse::ArgumentParser program(argv[0], "0.1"); // Define arguments. program.add_argument("filename") .help("path to binary file containing uin64_t keys"); program.add_argument("layer1") .help("layer1 model type, either linear_regression, linear_spline, cubic_spline, or radix."); program.add_argument("layer2") .help("layer2 model type, either linear_regression, linear_spline, or cubic_spline."); program.add_argument("n_models") .help("number of models on layer2, power of two is recommended.") .action([](const std::string &s) { return std::stoul(s); }); program.add_argument("bound_type") .help("type of error bounds used, either labs, lind, gabs, or gind."); program.add_argument("--header") .help("output csv header") .default_value(false) .implicit_value(true); // Parse arguments. try { program.parse_args(argc, argv); } catch (const std::runtime_error &err) { std::cout << err.what() << '\n' << program; exit(EXIT_FAILURE); } // Read arguments. const auto filename = program.get<std::string>("filename"); const auto dataset_name = split(filename, '/').back(); const auto layer1 = program.get<std::string>("layer1"); const auto layer2 = program.get<std::string>("layer2"); const auto n_models = program.get<std::size_t>("n_models"); const auto bound_type = program.get<std::string>("bound_type"); // Load keys. auto keys = load_data<key_type>(filename); // Lookup experiment. Config config{layer1, layer2, bound_type}; if (exp_map.find(config) == exp_map.end()) { std::cerr << "Error: " << layer1 << ',' << layer2 << ',' << bound_type << " is not a valid RMI configuration." << std::endl; exit(EXIT_FAILURE); } exp_fn_ptr exp_fn = exp_map[config]; // Output header. if (program["--header"] == true) std::cout << "dataset," << "n_keys," << "layer1," << "layer2," << "n_models," << "bounds," << "size_in_bytes," << "mean_interval," << "median_interval," << "stdev_interval," << "min_interval," << "max_interval" << std::endl; // Run experiment. (*exp_fn)(keys, n_models, dataset_name, layer1, layer2, bound_type); exit(EXIT_SUCCESS); }
#include "SoyRef.h" #include "BufferArray.hpp" namespace Private { // max number of unique refs; sizeof(gAlphabetRaw) * SoyRef::MaxStringLength (63 * 8) const char gAlphabetRaw[] = "_ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789abcdefghijklmnopqrstuvwxyz"; BufferArray<char,100> gAlphabet; // initialising this seems to mess up a bit in release, size is right, but contents are zero. BufferArray<size_t,256> gAlphabetLookup; }; struct uint64Chars { uint64Chars() : m64 ( 0 ) { } union { uint64 m64; unsigned char mChars[SoyRef::MaxStringLength]; }; }; // accessor for alphabet in case it's used BEFORE the global is constructed. Naughty globals const BufferArray<char,100>& GetAlphabet() { // force initialisation auto& Alphabet = Private::gAlphabet; if ( Alphabet.IsEmpty() ) { Alphabet.PushBackArray( Private::gAlphabetRaw ); // remove terminator if (Alphabet.GetBack() == '\0') Alphabet.Remove('\0'); } return Private::gAlphabet; } const BufferArray<size_t,256>& GetAlphabetLookup() { auto& AlphabetLookup = Private::gAlphabetLookup; // generate if ( AlphabetLookup.IsEmpty() ) { auto& Alphabet = GetAlphabet(); AlphabetLookup.SetSize( 256 ); for ( int i=0; i<256; i++ ) { auto AlphabetIndex = Alphabet.FindIndex( static_cast<char>(i) ); // non-alphabet characters turn into #0 (default char) AlphabetLookup[i] = (AlphabetIndex < 0) ? 0 : AlphabetIndex; } } return AlphabetLookup; } void CorrectAlphabetChar(char& Char) { auto& AlphabetLookup = GetAlphabetLookup(); auto& Alphabet = GetAlphabet(); Char = Alphabet[ AlphabetLookup[Char] ]; } BufferArray<size_t,SoyRef::MaxStringLength> ToAlphabetIndexes(uint64 Ref64) { uint64Chars Ref64Chars; Ref64Chars.m64 = Ref64; auto& AlphabetLookup = GetAlphabetLookup(); BufferArray<size_t,SoyRef::MaxStringLength> Indexes; for ( int i=0; i<SoyRef::MaxStringLength; i++ ) { auto& Char = Ref64Chars.mChars[i]; Indexes.PushBack( AlphabetLookup[Char] ); } return Indexes; } uint64 FromAlphabetIndexes(const BufferArray<size_t,SoyRef::MaxStringLength>& Indexes) { // turn back to chars and cram into a u64 uint64Chars Ref64Chars; assert( Indexes.GetSize() == SoyRef::MaxStringLength ); auto& Alphabet = GetAlphabet(); for ( int i=0; i<Indexes.GetSize(); i++ ) { auto Index = Indexes[i]; auto& RefChar = Ref64Chars.mChars[i]; RefChar = Alphabet[Index]; if (RefChar == '\0') throw "No character should be zero"; } return Ref64Chars.m64; } uint64 SoyRef::FromString(const std::string& String) { // make up indexes uint64Chars Ref64Chars; //auto& AlphabetLookup = GetAlphabetLookup(); GetAlphabetLookup(); auto& Alphabet = GetAlphabet(); for ( int i=0; i<SoyRef::MaxStringLength; i++ ) { char StringChar = i < String.length() ? String[i] : Alphabet[0]; CorrectAlphabetChar( StringChar ); Ref64Chars.mChars[i] = StringChar; } return Ref64Chars.m64; } std::string SoyRef::ToString() const { // turn integer into alphabet indexes auto AlphabetIndexes = ToAlphabetIndexes( mRef ); auto& Alphabet = GetAlphabet(); // concat alphabet chars std::string String; for ( int i=0; i<AlphabetIndexes.GetSize(); i++ ) { auto Index = AlphabetIndexes[i]; char Char = Alphabet[Index]; String += Char; } return String; } void SoyRef::Increment() { auto AlphabetIndexes = ToAlphabetIndexes( mRef ); auto& Alphabet = GetAlphabet(); // start at the back and increment auto MaxIndex = Alphabet.GetSize()-1; for ( ssize_t i=AlphabetIndexes.GetSize()-1; i>=0; i-- ) { // increment index AlphabetIndexes[i]++; if ( AlphabetIndexes[i] <= MaxIndex ) break; // rolled over... AlphabetIndexes[i] = 0; // ... let i-- and we increment previous char } // convert back mRef = FromAlphabetIndexes( AlphabetIndexes ); // if the "final integer" is reached, we'll end up with 000000000 (ie, invalid). // if this is the case, we need to increment the alphabet, or start packing. // gr: actually, this will never happen. 0000000 is _________ (not invalid!) if (!IsValid()) throw Soy::AssertException("Generated invalid SoyRef during increment"); } bool SoyRef::IsValid() const { // if any char is 0, its invalid. // but it should be all or nothing for (auto i = 0; i < std::size(mRefChars); i++) { auto& Char = mRefChars[i]; if (Char == '\0') { if (mRef != 0) throw Soy::AssertException("Corrupted soyref found"); return false; } } return true; }
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE762_Mismatched_Memory_Management_Routines__delete_array_char_malloc_53b.cpp Label Definition File: CWE762_Mismatched_Memory_Management_Routines__delete_array.label.xml Template File: sources-sinks-53b.tmpl.cpp */ /* * @description * CWE: 762 Mismatched Memory Management Routines * BadSource: malloc Allocate data using malloc() * GoodSource: Allocate data using new [] * Sinks: * GoodSink: Deallocate data using free() * BadSink : Deallocate data using delete [] * Flow Variant: 53 Data flow: data passed as an argument from one function through two others to a fourth; all four functions are in different source files * * */ #include "std_testcase.h" namespace CWE762_Mismatched_Memory_Management_Routines__delete_array_char_malloc_53 { #ifndef OMITBAD /* bad function declaration */ void bad_sink_c(char * data); void bad_sink_b(char * data) { bad_sink_c(data); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2B_sink_c(char * data); void goodG2B_sink_b(char * data) { goodG2B_sink_c(data); } /* goodB2G uses the BadSource with the GoodSink */ void goodB2G_sink_c(char * data); void goodB2G_sink_b(char * data) { goodB2G_sink_c(data); } #endif /* OMITGOOD */ } // close namespace
// Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef __STOUT_OS_WINDOWS_CLOSE_HPP__ #define __STOUT_OS_WINDOWS_CLOSE_HPP__ #include <errno.h> #include <stout/nothing.hpp> #include <stout/try.hpp> #include <stout/windows/error.hpp> #include <stout/os/windows/fd.hpp> #include <stout/os/windows/socket.hpp> namespace os { inline Try<Nothing> close(const WindowsFD& fd) { switch (fd.type()) { case WindowsFD::FD_CRT: case WindowsFD::FD_HANDLE: { // We don't need to call `CloseHandle` on `fd.handle`, because calling // `_close` on the corresponding CRT FD implicitly invokes `CloseHandle`. if (::_close(fd.crt()) < 0) { return ErrnoError(); } break; } case WindowsFD::FD_SOCKET: { // NOTE: Since closing an unconnected socket is not an error in POSIX, // we simply ignore it here. if (::shutdown(fd, SD_BOTH) == SOCKET_ERROR && WSAGetLastError() != WSAENOTCONN) { return WindowsSocketError("Failed to shutdown a socket"); } if (::closesocket(fd) == SOCKET_ERROR) { return WindowsSocketError("Failed to close a socket"); } break; } } return Nothing(); } } // namespace os { #endif // __STOUT_OS_WINDOWS_CLOSE_HPP__
/*################################################################################ ## ## Copyright (C) 2016-2020 Keith O'Hara ## ## This file is part of the GCE-Math C++ library. ## ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## ## http://www.apache.org/licenses/LICENSE-2.0 ## ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. ## ################################################################################*/ /* * compile-time incomplete beta function * * see eq. 18.5.17a in the Handbook of Continued Fractions for Special Functions */ #ifndef GCEM_incomplete_beta_HPP #define GCEM_incomplete_beta_HPP namespace internal { template <typename T> constexpr auto incomplete_beta_cf(T a, T b, T z, T cJ, T dJ, T fJ, int depth) noexcept -> T; // // coefficients; see eq. 18.5.17b template <typename T> constexpr auto incomplete_beta_coef_even(const T a, const T b, const T z, const int k) noexcept -> T { return (-z * (a + k) * (a + b + k) / ((a + 2 * k) * (a + 2 * k + T(1)))); } template <typename T> constexpr auto incomplete_beta_coef_odd(const T a, const T b, const T z, const int k) noexcept -> T { return (z * k * (b - k) / ((a + 2 * k - T(1)) * (a + 2 * k))); } template <typename T> constexpr auto incomplete_beta_coef(const T a, const T b, const T z, const int depth) noexcept -> T { return (!is_odd(depth) ? incomplete_beta_coef_even(a, b, z, depth / 2) : incomplete_beta_coef_odd(a, b, z, (depth + 1) / 2)); } // // update formulae for the modified Lentz method template <typename T> constexpr auto incomplete_beta_c_update(const T a, const T b, const T z, const T cJ, const int depth) noexcept -> T { return (T(1) + incomplete_beta_coef(a, b, z, depth) / cJ); } template <typename T> constexpr auto incomplete_beta_d_update(const T a, const T b, const T z, const T dJ, const int depth) noexcept -> T { return (T(1) / (T(1) + incomplete_beta_coef(a, b, z, depth) * dJ)); } // // convergence-type condition template <typename T> constexpr auto incomplete_beta_decision( const T a, const T b, const T z, const T cJ, const T dJ, const T fJ, const int depth) noexcept -> T { return ( // tolerance check abs(cJ * dJ - T(1)) < GCEM_INCML_BETA_TOL ? fJ * cJ * dJ : // max_iter check depth < GCEM_INCML_BETA_MAX_ITER ? // if incomplete_beta_cf(a, b, z, cJ, dJ, fJ * cJ * dJ, depth + 1) : // else fJ * cJ * dJ); } template <typename T> constexpr auto incomplete_beta_cf( const T a, const T b, const T z, const T cJ, const T dJ, const T fJ, const int depth) noexcept -> T { return incomplete_beta_decision( a, b, z, incomplete_beta_c_update(a, b, z, cJ, depth), incomplete_beta_d_update(a, b, z, dJ, depth), fJ, depth); } // // x^a (1-x)^{b} / (a beta(a,b)) * cf template <typename T> constexpr auto incomplete_beta_begin(const T a, const T b, const T z) noexcept -> T { return ((exp(a * log(z) + b * log(T(1) - z) - lbeta(a, b)) / a) * incomplete_beta_cf(a, b, z, T(1), incomplete_beta_d_update(a, b, z, T(1), 0), incomplete_beta_d_update(a, b, z, T(1), 0), 1)); } template <typename T> constexpr auto incomplete_beta_check(const T a, const T b, const T z) noexcept -> T { return ( // NaN check any_nan(a, b, z) ? etl::numeric_limits<T>::quiet_NaN() : // indistinguishable from zero etl::numeric_limits<T>::epsilon() > z ? T(0) : // parameter check for performance (a + T(1)) / (a + b + T(2)) > z ? incomplete_beta_begin(a, b, z) : T(1) - incomplete_beta_begin(b, a, T(1) - z)); } template <typename T1, typename T2, typename T3, typename TC = common_return_t<T1, T2, T3>> constexpr auto incomplete_beta_type_check(const T1 a, const T2 b, const T3 p) noexcept -> TC { return incomplete_beta_check(static_cast<TC>(a), static_cast<TC>(b), static_cast<TC>(p)); } } // namespace internal /** * Compile-time regularized incomplete beta function * * @param a a real-valued, non-negative input. * @param b a real-valued, non-negative input. * @param z a real-valued, non-negative input. * * @return computes the regularized incomplete beta function, * \f[ \frac{\text{B}(z;\alpha,\beta)}{\text{B}(\alpha,\beta)} = * \frac{1}{\text{B}(\alpha,\beta)}\int_0^z t^{a-1} (1-t)^{\beta-1} dt \f] using * a continued fraction representation, found in the Handbook of Continued * Fractions for Special Functions, and a modified Lentz method. \f[ * \frac{\text{B}(z;\alpha,\beta)}{\text{B}(\alpha,\beta)} = \frac{z^{\alpha} * (1-t)^{\beta}}{\alpha \text{B}(\alpha,\beta)} \dfrac{a_1}{1 + \dfrac{a_2}{1 + * \dfrac{a_3}{1 + \dfrac{a_4}{1 + \ddots}}}} \f] where \f$ a_1 = 1 \f$ and \f[ * a_{2m+2} = - \frac{(\alpha + m)(\alpha + \beta + m)}{(\alpha + 2m)(\alpha + * 2m + 1)}, \ m \geq 0 \f] \f[ a_{2m+1} = \frac{m(\beta - m)}{(\alpha + 2m - * 1)(\alpha + 2m)}, \ m \geq 1 \f] The Lentz method works as follows: let \f$ * f_j \f$ denote the value of the continued fraction up to the first \f$ j \f$ * terms; \f$ f_j \f$ is updated as follows: \f[ c_j = 1 + a_j / c_{j-1}, * \ \ d_j = 1 / (1 + a_j d_{j-1}) \f] \f[ f_j = c_j d_j f_{j-1} \f] */ template <typename T1, typename T2, typename T3> constexpr auto incomplete_beta(const T1 a, const T2 b, const T3 z) noexcept -> common_return_t<T1, T2, T3> { return internal::incomplete_beta_type_check(a, b, z); } #endif
/** * Copyright (C) 2015 MongoDB Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * As a special exception, the copyright holders give permission to link the * code of portions of this program with the OpenSSL library under certain * conditions as described in each individual source file and distribute * linked combinations including the program with the OpenSSL library. You * must comply with the GNU Affero General Public License in all respects * for all of the code used other than as permitted herein. If you modify * file(s) with this exception, you may extend this exception to your * version of the file(s), but you are not obligated to do so. If you do not * wish to do so, delete this exception statement from your version. If you * delete this exception statement from all source files in the program, * then also delete it in the license file. */ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding #include "mongo/platform/basic.h" #include "mongo/db/s/migration_chunk_cloner_source_legacy.h" #include "mongo/base/status.h" #include "mongo/client/read_preference.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/working_set_common.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/s/start_chunk_clone_request.h" #include "mongo/db/service_context.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/util/elapsed_tracker.h" #include "mongo/util/log.h" #include "mongo/util/mongoutils/str.h" #include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" namespace mongo { namespace { const char kRecvChunkStatus[] = "_recvChunkStatus"; const char kRecvChunkCommit[] = "_recvChunkCommit"; const char kRecvChunkAbort[] = "_recvChunkAbort"; const int kMaxObjectPerChunk{250000}; bool isInRange(const BSONObj& obj, const BSONObj& min, const BSONObj& max, const ShardKeyPattern& shardKeyPattern) { BSONObj k = shardKeyPattern.extractShardKeyFromDoc(obj); return k.woCompare(min) >= 0 && k.woCompare(max) < 0; } BSONObj createRequestWithSessionId(StringData commandName, const NamespaceString& nss, const MigrationSessionId& sessionId) { BSONObjBuilder builder; builder.append(commandName, nss.ns()); sessionId.append(&builder); return builder.obj(); } } // namespace /** * Used to receive invalidation notifications from operations, which delete documents. */ class DeleteNotificationStage final : public PlanStage { public: DeleteNotificationStage(MigrationChunkClonerSourceLegacy* cloner, OperationContext* opCtx) : PlanStage("SHARDING_NOTIFY_DELETE", opCtx), _cloner(cloner) {} void doInvalidate(OperationContext* opCtx, const RecordId& dl, InvalidationType type) override { if (type == INVALIDATION_DELETION) { stdx::lock_guard<stdx::mutex> sl(_cloner->_mutex); _cloner->_cloneLocs.erase(dl); } } StageState doWork(WorkingSetID* out) override { MONGO_UNREACHABLE; } bool isEOF() override { MONGO_UNREACHABLE; } std::unique_ptr<PlanStageStats> getStats() override { MONGO_UNREACHABLE; } SpecificStats* getSpecificStats() const override { MONGO_UNREACHABLE; } StageType stageType() const override { return STAGE_NOTIFY_DELETE; } private: MigrationChunkClonerSourceLegacy* const _cloner; }; /** * Used to commit work for LogOpForSharding. Used to keep track of changes in documents that are * part of a chunk being migrated. */ class LogOpForShardingHandler final : public RecoveryUnit::Change { public: /** * Invariant: idObj should belong to a document that is part of the active chunk being migrated */ LogOpForShardingHandler(MigrationChunkClonerSourceLegacy* cloner, const BSONObj& idObj, const char op) : _cloner(cloner), _idObj(idObj.getOwned()), _op(op) {} void commit() override { switch (_op) { case 'd': { stdx::lock_guard<stdx::mutex> sl(_cloner->_mutex); _cloner->_deleted.push_back(_idObj); _cloner->_memoryUsed += _idObj.firstElement().size() + 5; } break; case 'i': case 'u': { stdx::lock_guard<stdx::mutex> sl(_cloner->_mutex); _cloner->_reload.push_back(_idObj); _cloner->_memoryUsed += _idObj.firstElement().size() + 5; } break; default: MONGO_UNREACHABLE; } } void rollback() override {} private: MigrationChunkClonerSourceLegacy* const _cloner; const BSONObj _idObj; const char _op; }; MigrationChunkClonerSourceLegacy::MigrationChunkClonerSourceLegacy(MoveChunkRequest request, const BSONObj& shardKeyPattern, ConnectionString donorConnStr, HostAndPort recipientHost) : _args(std::move(request)), _shardKeyPattern(shardKeyPattern), _sessionId(MigrationSessionId::generate(_args.getFromShardId().toString(), _args.getToShardId().toString())), _donorConnStr(std::move(donorConnStr)), _recipientHost(std::move(recipientHost)) {} MigrationChunkClonerSourceLegacy::~MigrationChunkClonerSourceLegacy() { invariant(_state == kDone); invariant(!_deleteNotifyExec); } Status MigrationChunkClonerSourceLegacy::startClone(OperationContext* opCtx) { invariant(_state == kNew); invariant(!opCtx->lockState()->isLocked()); // Load the ids of the currently available documents auto storeCurrentLocsStatus = _storeCurrentLocs(opCtx); if (!storeCurrentLocsStatus.isOK()) { return storeCurrentLocsStatus; } // Tell the recipient shard to start cloning BSONObjBuilder cmdBuilder; StartChunkCloneRequest::appendAsCommand(&cmdBuilder, _args.getNss(), _sessionId, _donorConnStr, _args.getFromShardId(), _args.getToShardId(), _args.getMinKey(), _args.getMaxKey(), _shardKeyPattern.toBSON(), _args.getSecondaryThrottle()); auto startChunkCloneResponseStatus = _callRecipient(cmdBuilder.obj()); if (!startChunkCloneResponseStatus.isOK()) { return startChunkCloneResponseStatus.getStatus(); } // TODO (Kal): Setting the state to kCloning below means that if cancelClone was called we will // send a cancellation command to the recipient. The reason to limit the cases when we send // cancellation is for backwards compatibility with 3.2 nodes, which cannot differentiate // between cancellations for different migration sessions. It is thus possible that a second // migration from different donor, but the same recipient would certainly abort an already // running migration. stdx::lock_guard<stdx::mutex> sl(_mutex); _state = kCloning; return Status::OK(); } Status MigrationChunkClonerSourceLegacy::awaitUntilCriticalSectionIsAppropriate( OperationContext* opCtx, Milliseconds maxTimeToWait) { invariant(_state == kCloning); invariant(!opCtx->lockState()->isLocked()); const auto startTime = Date_t::now(); int iteration = 0; while ((Date_t::now() - startTime) < maxTimeToWait) { // Exponential sleep backoff, up to 1024ms. Don't sleep much on the first few iterations, // since we want empty chunk migrations to be fast. sleepmillis(1LL << std::min(iteration, 10)); iteration++; auto responseStatus = _callRecipient( createRequestWithSessionId(kRecvChunkStatus, _args.getNss(), _sessionId)); if (!responseStatus.isOK()) { return {responseStatus.getStatus().code(), str::stream() << "Failed to contact recipient shard to monitor data transfer due to " << responseStatus.getStatus().toString()}; } const BSONObj& res = responseStatus.getValue(); stdx::lock_guard<stdx::mutex> sl(_mutex); const std::size_t cloneLocsRemaining = _cloneLocs.size(); log() << "moveChunk data transfer progress: " << redact(res) << " mem used: " << _memoryUsed << " documents remaining to clone: " << cloneLocsRemaining; if (res["state"].String() == "steady") { if (cloneLocsRemaining != 0) { return {ErrorCodes::OperationIncomplete, str::stream() << "Unable to enter critical section because the recipient " "shard thinks all data is cloned while there are still " << cloneLocsRemaining << " documents remaining"}; } return Status::OK(); } if (res["state"].String() == "fail") { return {ErrorCodes::OperationFailed, "Data transfer error"}; } auto migrationSessionIdStatus = MigrationSessionId::extractFromBSON(res); if (!migrationSessionIdStatus.isOK()) { return {ErrorCodes::OperationIncomplete, str::stream() << "Unable to retrieve the id of the migration session due to " << migrationSessionIdStatus.getStatus().toString()}; } if (res["ns"].str() != _args.getNss().ns() || res["from"].str() != _donorConnStr.toString() || !res["min"].isABSONObj() || res["min"].Obj().woCompare(_args.getMinKey()) != 0 || !res["max"].isABSONObj() || res["max"].Obj().woCompare(_args.getMaxKey()) != 0 || !_sessionId.matches(migrationSessionIdStatus.getValue())) { // This can happen when the destination aborted the migration and received another // recvChunk before this thread sees the transition to the abort state. This is // currently possible only if multiple migrations are happening at once. This is an // unfortunate consequence of the shards not being able to keep track of multiple // incoming and outgoing migrations. return {ErrorCodes::OperationIncomplete, "Destination shard aborted migration because a new one is running"}; } if (_memoryUsed > 500 * 1024 * 1024) { // This is too much memory for us to use so we're going to abort the migration return {ErrorCodes::ExceededMemoryLimit, "Aborting migration because of high memory usage"}; } Status interruptStatus = opCtx->checkForInterruptNoAssert(); if (!interruptStatus.isOK()) { return interruptStatus; } } return {ErrorCodes::ExceededTimeLimit, "Timed out waiting for the cloner to catch up"}; } Status MigrationChunkClonerSourceLegacy::commitClone(OperationContext* opCtx) { invariant(_state == kCloning); invariant(!opCtx->lockState()->isLocked()); auto responseStatus = _callRecipient(createRequestWithSessionId(kRecvChunkCommit, _args.getNss(), _sessionId)); if (responseStatus.isOK()) { _cleanup(opCtx); return Status::OK(); } cancelClone(opCtx); return responseStatus.getStatus(); } void MigrationChunkClonerSourceLegacy::cancelClone(OperationContext* opCtx) { invariant(!opCtx->lockState()->isLocked()); switch (_state) { case kDone: break; case kCloning: _callRecipient(createRequestWithSessionId(kRecvChunkAbort, _args.getNss(), _sessionId)); // Intentional fall through case kNew: _cleanup(opCtx); break; default: MONGO_UNREACHABLE; } } bool MigrationChunkClonerSourceLegacy::isDocumentInMigratingChunk(OperationContext* opCtx, const BSONObj& doc) { return isInRange(doc, _args.getMinKey(), _args.getMaxKey(), _shardKeyPattern); } void MigrationChunkClonerSourceLegacy::onInsertOp(OperationContext* opCtx, const BSONObj& insertedDoc) { dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX)); BSONElement idElement = insertedDoc["_id"]; if (idElement.eoo()) { warning() << "logInsertOp got a document with no _id field, ignoring inserted document: " << redact(insertedDoc); return; } if (!isInRange(insertedDoc, _args.getMinKey(), _args.getMaxKey(), _shardKeyPattern)) { return; } opCtx->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'i')); } void MigrationChunkClonerSourceLegacy::onUpdateOp(OperationContext* opCtx, const BSONObj& updatedDoc) { dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX)); BSONElement idElement = updatedDoc["_id"]; if (idElement.eoo()) { warning() << "logUpdateOp got a document with no _id field, ignoring updatedDoc: " << redact(updatedDoc); return; } if (!isInRange(updatedDoc, _args.getMinKey(), _args.getMaxKey(), _shardKeyPattern)) { return; } opCtx->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'u')); } void MigrationChunkClonerSourceLegacy::onDeleteOp(OperationContext* opCtx, const BSONObj& deletedDocId) { dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IX)); BSONElement idElement = deletedDocId["_id"]; if (idElement.eoo()) { warning() << "logDeleteOp got a document with no _id field, ignoring deleted doc: " << redact(deletedDocId); return; } opCtx->recoveryUnit()->registerChange(new LogOpForShardingHandler(this, idElement.wrap(), 'd')); } uint64_t MigrationChunkClonerSourceLegacy::getCloneBatchBufferAllocationSize() { stdx::lock_guard<stdx::mutex> sl(_mutex); return std::min(static_cast<uint64_t>(BSONObjMaxUserSize), _averageObjectSizeForCloneLocs * _cloneLocs.size()); } Status MigrationChunkClonerSourceLegacy::nextCloneBatch(OperationContext* opCtx, Collection* collection, BSONArrayBuilder* arrBuilder) { dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS)); ElapsedTracker tracker(opCtx->getServiceContext()->getFastClockSource(), internalQueryExecYieldIterations.load(), Milliseconds(internalQueryExecYieldPeriodMS.load())); stdx::lock_guard<stdx::mutex> sl(_mutex); std::set<RecordId>::iterator it; for (it = _cloneLocs.begin(); it != _cloneLocs.end(); ++it) { // We must always make progress in this method by at least one document because empty return // indicates there is no more initial clone data. if (arrBuilder->arrSize() && tracker.intervalHasElapsed()) { break; } Snapshotted<BSONObj> doc; if (collection->findDoc(opCtx, *it, &doc)) { // Use the builder size instead of accumulating the document sizes directly so that we // take into consideration the overhead of BSONArray indices. if (arrBuilder->arrSize() && (arrBuilder->len() + doc.value().objsize() + 1024) > BSONObjMaxUserSize) { break; } arrBuilder->append(doc.value()); } } _cloneLocs.erase(_cloneLocs.begin(), it); // If we have drained all the cloned data, there is no need to keep the delete notify executor // around if (_cloneLocs.empty()) { _deleteNotifyExec.reset(); } return Status::OK(); } Status MigrationChunkClonerSourceLegacy::nextModsBatch(OperationContext* opCtx, Database* db, BSONObjBuilder* builder) { dassert(opCtx->lockState()->isCollectionLockedForMode(_args.getNss().ns(), MODE_IS)); stdx::lock_guard<stdx::mutex> sl(_mutex); // All clone data must have been drained before starting to fetch the incremental changes invariant(_cloneLocs.empty()); long long docSizeAccumulator = 0; _xfer(opCtx, db, &_deleted, builder, "deleted", &docSizeAccumulator, false); _xfer(opCtx, db, &_reload, builder, "reload", &docSizeAccumulator, true); builder->append("size", docSizeAccumulator); return Status::OK(); } void MigrationChunkClonerSourceLegacy::_cleanup(OperationContext* opCtx) { { stdx::lock_guard<stdx::mutex> sl(_mutex); _state = kDone; _reload.clear(); _deleted.clear(); } if (_deleteNotifyExec) { AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS); _deleteNotifyExec.reset(); } } StatusWith<BSONObj> MigrationChunkClonerSourceLegacy::_callRecipient(const BSONObj& cmdObj) { executor::RemoteCommandResponse responseStatus( Status{ErrorCodes::InternalError, "Uninitialized value"}); auto executor = grid.getExecutorPool()->getFixedExecutor(); auto scheduleStatus = executor->scheduleRemoteCommand( executor::RemoteCommandRequest(_recipientHost, "admin", cmdObj, nullptr), [&responseStatus](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) { responseStatus = args.response; }); // TODO: Update RemoteCommandTargeter on NotMaster errors. if (!scheduleStatus.isOK()) { return scheduleStatus.getStatus(); } executor->wait(scheduleStatus.getValue()); if (!responseStatus.isOK()) { return responseStatus.status; } Status commandStatus = getStatusFromCommandResult(responseStatus.data); if (!commandStatus.isOK()) { return commandStatus; } return responseStatus.data.getOwned(); } Status MigrationChunkClonerSourceLegacy::_storeCurrentLocs(OperationContext* opCtx) { AutoGetCollection autoColl(opCtx, _args.getNss(), MODE_IS); Collection* const collection = autoColl.getCollection(); if (!collection) { return {ErrorCodes::NamespaceNotFound, str::stream() << "Collection " << _args.getNss().ns() << " does not exist."}; } // Allow multiKey based on the invariant that shard keys must be single-valued. Therefore, any // multi-key index prefixed by shard key cannot be multikey over the shard key fields. IndexDescriptor* const idx = collection->getIndexCatalog()->findShardKeyPrefixedIndex(opCtx, _shardKeyPattern.toBSON(), false); // requireSingleKey if (!idx) { return {ErrorCodes::IndexNotFound, str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON() << " in storeCurrentLocs for " << _args.getNss().ns()}; } // Install the stage, which will listen for notifications on the collection auto statusWithDeleteNotificationPlanExecutor = PlanExecutor::make(opCtx, stdx::make_unique<WorkingSet>(), stdx::make_unique<DeleteNotificationStage>(this, opCtx), collection, PlanExecutor::YIELD_MANUAL); if (!statusWithDeleteNotificationPlanExecutor.isOK()) { return statusWithDeleteNotificationPlanExecutor.getStatus(); } _deleteNotifyExec = std::move(statusWithDeleteNotificationPlanExecutor.getValue()); _deleteNotifyExec->registerExec(collection); // Assume both min and max non-empty, append MinKey's to make them fit chosen index const KeyPattern kp(idx->keyPattern()); BSONObj min = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMinKey(), false)); BSONObj max = Helpers::toKeyFormat(kp.extendRangeBound(_args.getMaxKey(), false)); std::unique_ptr<PlanExecutor> exec( InternalPlanner::indexScan(opCtx, collection, idx, min, max, BoundInclusion::kIncludeStartKeyOnly, PlanExecutor::YIELD_MANUAL)); // We can afford to yield here because any change to the base data that we might miss is already // being queued and will migrate in the 'transferMods' stage. exec->setYieldPolicy(PlanExecutor::YIELD_AUTO, collection); // Use the average object size to estimate how many objects a full chunk would carry do that // while traversing the chunk's range using the sharding index, below there's a fair amount of // slack before we determine a chunk is too large because object sizes will vary. unsigned long long maxRecsWhenFull; long long avgRecSize; const long long totalRecs = collection->numRecords(opCtx); if (totalRecs > 0) { avgRecSize = collection->dataSize(opCtx) / totalRecs; maxRecsWhenFull = _args.getMaxChunkSizeBytes() / avgRecSize; maxRecsWhenFull = std::min((unsigned long long)(kMaxObjectPerChunk + 1), 130 * maxRecsWhenFull / 100 /* slack */); } else { avgRecSize = 0; maxRecsWhenFull = kMaxObjectPerChunk + 1; } // Do a full traversal of the chunk and don't stop even if we think it is a large chunk we want // the number of records to better report, in that case. bool isLargeChunk = false; unsigned long long recCount = 0; BSONObj obj; RecordId recordId; PlanExecutor::ExecState state; while (PlanExecutor::ADVANCED == (state = exec->getNext(&obj, &recordId))) { if (!isLargeChunk) { stdx::lock_guard<stdx::mutex> lk(_mutex); _cloneLocs.insert(recordId); } if (++recCount > maxRecsWhenFull) { isLargeChunk = true; // Continue on despite knowing that it will fail, just to get the correct value for // recCount } } if (PlanExecutor::DEAD == state || PlanExecutor::FAILURE == state) { return {ErrorCodes::InternalError, str::stream() << "Executor error while scanning for documents belonging to chunk: " << WorkingSetCommon::toStatusString(obj)}; } const uint64_t collectionAverageObjectSize = collection->averageObjectSize(opCtx); if (isLargeChunk) { return { ErrorCodes::ChunkTooBig, str::stream() << "Cannot move chunk: the maximum number of documents for a chunk is " << maxRecsWhenFull << ", the maximum chunk size is " << _args.getMaxChunkSizeBytes() << ", average document size is " << avgRecSize << ". Found " << recCount << " documents in chunk " << " ns: " << _args.getNss().ns() << " " << _args.getMinKey() << " -> " << _args.getMaxKey()}; } stdx::lock_guard<stdx::mutex> lk(_mutex); _averageObjectSizeForCloneLocs = collectionAverageObjectSize + 12; return Status::OK(); } void MigrationChunkClonerSourceLegacy::_xfer(OperationContext* opCtx, Database* db, std::list<BSONObj>* docIdList, BSONObjBuilder* builder, const char* fieldName, long long* sizeAccumulator, bool explode) { const long long maxSize = 1024 * 1024; if (docIdList->size() == 0 || *sizeAccumulator > maxSize) { return; } const std::string& ns = _args.getNss().ns(); BSONArrayBuilder arr(builder->subarrayStart(fieldName)); std::list<BSONObj>::iterator docIdIter = docIdList->begin(); while (docIdIter != docIdList->end() && *sizeAccumulator < maxSize) { BSONObj idDoc = *docIdIter; if (explode) { BSONObj fullDoc; if (Helpers::findById(opCtx, db, ns.c_str(), idDoc, fullDoc)) { arr.append(fullDoc); *sizeAccumulator += fullDoc.objsize(); } } else { arr.append(idDoc); *sizeAccumulator += idDoc.objsize(); } docIdIter = docIdList->erase(docIdIter); } arr.done(); } } // namespace mongo
#include "include/flutter_app_icon_badge/flutter_app_icon_badge_plugin.h" // This must be included before many other Windows headers. #include <windows.h> // For getPlatformVersion; remove unless needed for your plugin implementation. #include <VersionHelpers.h> #include <flutter/method_channel.h> #include <flutter/plugin_registrar_windows.h> #include <flutter/standard_method_codec.h> #include <map> #include <memory> #include <sstream> namespace { class FlutterAppIconBadgePlugin : public flutter::Plugin { public: static void RegisterWithRegistrar(flutter::PluginRegistrarWindows *registrar); FlutterAppIconBadgePlugin(); virtual ~FlutterAppIconBadgePlugin(); private: // Called when a method is called on this plugin's channel from Dart. void HandleMethodCall( const flutter::MethodCall<flutter::EncodableValue> &method_call, std::unique_ptr<flutter::MethodResult<flutter::EncodableValue>> result); }; // static void FlutterAppIconBadgePlugin::RegisterWithRegistrar( flutter::PluginRegistrarWindows *registrar) { auto channel = std::make_unique<flutter::MethodChannel<flutter::EncodableValue>>( registrar->messenger(), "flutter_app_icon_badge", &flutter::StandardMethodCodec::GetInstance()); auto plugin = std::make_unique<FlutterAppIconBadgePlugin>(); channel->SetMethodCallHandler( [plugin_pointer = plugin.get()](const auto &call, auto result) { plugin_pointer->HandleMethodCall(call, std::move(result)); }); registrar->AddPlugin(std::move(plugin)); } FlutterAppIconBadgePlugin::FlutterAppIconBadgePlugin() {} FlutterAppIconBadgePlugin::~FlutterAppIconBadgePlugin() {} void FlutterAppIconBadgePlugin::HandleMethodCall( const flutter::MethodCall<flutter::EncodableValue> &method_call, std::unique_ptr<flutter::MethodResult<flutter::EncodableValue>> result) { if (method_call.method_name().compare("updateBadge") == 0) { // TODO implement } else if (method_call.method_name().compare("removeBadge") == 0) { // TODO implement } else if (method_call.method_name().compare("isAppBadgeSupported") == 0) { // TODO implement result->Success(flutter::EncodableValue(false)); } else if (method_call.method_name().compare("isAppFocused") == 0) { // TODO implement result->Success(flutter::EncodableValue(false)); } else { result->NotImplemented(); } } } // namespace void FlutterAppIconBadgePluginRegisterWithRegistrar( FlutterDesktopPluginRegistrarRef registrar) { FlutterAppIconBadgePlugin::RegisterWithRegistrar( flutter::PluginRegistrarManager::GetInstance() ->GetRegistrar<flutter::PluginRegistrarWindows>(registrar)); }
/******************************************************** * * * Copyright (C) Microsoft. All rights reserved. * * * ********************************************************/ #include "pch.h" #include "NonClientIslandWindow.h" #include "../types/inc/utils.hpp" #include "TerminalThemeHelpers.h" using namespace winrt::Windows::UI; using namespace winrt::Windows::UI::Composition; using namespace winrt::Windows::UI::Xaml; using namespace winrt::Windows::UI::Xaml::Hosting; using namespace winrt::Windows::Foundation::Numerics; using namespace ::Microsoft::Console; using namespace ::Microsoft::Console::Types; static constexpr int AutohideTaskbarSize = 2; NonClientIslandWindow::NonClientIslandWindow(const ElementTheme& requestedTheme) noexcept : IslandWindow{}, _backgroundBrushColor{ RGB(0, 0, 0) }, _theme{ requestedTheme }, _isMaximized{ false } { } NonClientIslandWindow::~NonClientIslandWindow() { } static constexpr const wchar_t* dragBarClassName{ L"DRAG_BAR_WINDOW_CLASS" }; [[nodiscard]] LRESULT __stdcall NonClientIslandWindow::_StaticInputSinkWndProc(HWND const window, UINT const message, WPARAM const wparam, LPARAM const lparam) noexcept { WINRT_ASSERT(window); if (WM_NCCREATE == message) { auto cs = reinterpret_cast<CREATESTRUCT*>(lparam); auto nonClientIslandWindow{ reinterpret_cast<NonClientIslandWindow*>(cs->lpCreateParams) }; SetWindowLongPtr(window, GWLP_USERDATA, reinterpret_cast<LONG_PTR>(nonClientIslandWindow)); // fall through to default window procedure } else if (auto nonClientIslandWindow{ reinterpret_cast<NonClientIslandWindow*>(GetWindowLongPtr(window, GWLP_USERDATA)) }) { return nonClientIslandWindow->_InputSinkMessageHandler(message, wparam, lparam); } return DefWindowProc(window, message, wparam, lparam); } void NonClientIslandWindow::MakeWindow() noexcept { IslandWindow::MakeWindow(); static ATOM dragBarWindowClass{ []() { WNDCLASSEX wcEx{}; wcEx.cbSize = sizeof(wcEx); wcEx.style = CS_HREDRAW | CS_VREDRAW | CS_DBLCLKS; wcEx.lpszClassName = dragBarClassName; wcEx.hbrBackground = reinterpret_cast<HBRUSH>(GetStockObject(BLACK_BRUSH)); wcEx.hCursor = LoadCursor(nullptr, IDC_ARROW); wcEx.lpfnWndProc = &NonClientIslandWindow::_StaticInputSinkWndProc; wcEx.hInstance = wil::GetModuleInstanceHandle(); wcEx.cbWndExtra = sizeof(NonClientIslandWindow*); return RegisterClassEx(&wcEx); }() }; // The drag bar window is a child window of the top level window that is put // right on top of the drag bar. The XAML island window "steals" our mouse // messages which makes it hard to implement a custom drag area. By putting // a window on top of it, we prevent it from "stealing" the mouse messages. _dragBarWindow.reset(CreateWindowExW(WS_EX_LAYERED | WS_EX_NOREDIRECTIONBITMAP, dragBarClassName, L"", WS_CHILD, 0, 0, 0, 0, GetWindowHandle(), nullptr, wil::GetModuleInstanceHandle(), this)); THROW_HR_IF_NULL(E_UNEXPECTED, _dragBarWindow); } // Function Description: // - The window procedure for the drag bar forwards clicks on its client area to its parent as non-client clicks. LRESULT NonClientIslandWindow::_InputSinkMessageHandler(UINT const message, WPARAM const wparam, LPARAM const lparam) noexcept { std::optional<UINT> nonClientMessage{ std::nullopt }; // translate WM_ messages on the window to WM_NC* on the top level window switch (message) { case WM_LBUTTONDOWN: nonClientMessage = WM_NCLBUTTONDOWN; break; case WM_LBUTTONDBLCLK: nonClientMessage = WM_NCLBUTTONDBLCLK; break; case WM_LBUTTONUP: nonClientMessage = WM_NCLBUTTONUP; break; case WM_RBUTTONDOWN: nonClientMessage = WM_NCRBUTTONDOWN; break; case WM_RBUTTONDBLCLK: nonClientMessage = WM_NCRBUTTONDBLCLK; break; case WM_RBUTTONUP: nonClientMessage = WM_NCRBUTTONUP; break; } if (nonClientMessage.has_value()) { const POINT clientPt{ GET_X_LPARAM(lparam), GET_Y_LPARAM(lparam) }; POINT screenPt{ clientPt }; if (ClientToScreen(_dragBarWindow.get(), &screenPt)) { auto parentWindow{ GetWindowHandle() }; // Hit test the parent window at the screen coordinates the user clicked in the drag input sink window, // then pass that click through as an NC click in that location. const LRESULT hitTest{ SendMessage(parentWindow, WM_NCHITTEST, 0, MAKELPARAM(screenPt.x, screenPt.y)) }; SendMessage(parentWindow, nonClientMessage.value(), hitTest, 0); return 0; } } return DefWindowProc(_dragBarWindow.get(), message, wparam, lparam); } // Method Description: // - Resizes and shows/hides the drag bar input sink window. // This window is used to capture clicks on the non-client area. void NonClientIslandWindow::_ResizeDragBarWindow() noexcept { const til::rectangle rect{ _GetDragAreaRect() }; if (_IsTitlebarVisible() && rect.size().area() > 0) { SetWindowPos(_dragBarWindow.get(), HWND_TOP, rect.left<int>(), rect.top<int>() + _GetTopBorderHeight(), rect.width<int>(), rect.height<int>(), SWP_NOACTIVATE | SWP_SHOWWINDOW); SetLayeredWindowAttributes(_dragBarWindow.get(), 0, 255, LWA_ALPHA); } else { SetWindowPos(_dragBarWindow.get(), HWND_BOTTOM, 0, 0, 0, 0, SWP_HIDEWINDOW | SWP_NOMOVE | SWP_NOSIZE); } } // Method Description: // - Called when the app's size changes. When that happens, the size of the drag // bar may have changed. If it has, we'll need to update the WindowRgn of the // interop window. // Arguments: // - <unused> // Return Value: // - <none> void NonClientIslandWindow::_OnDragBarSizeChanged(winrt::Windows::Foundation::IInspectable /*sender*/, winrt::Windows::UI::Xaml::SizeChangedEventArgs /*eventArgs*/) { _ResizeDragBarWindow(); } void NonClientIslandWindow::OnAppInitialized() { IslandWindow::OnAppInitialized(); } void NonClientIslandWindow::Initialize() { IslandWindow::Initialize(); _UpdateFrameMargins(); // Set up our grid of content. We'll use _rootGrid as our root element. // There will be two children of this grid - the TitlebarControl, and the // "client content" _rootGrid.Children().Clear(); Controls::RowDefinition titlebarRow{}; Controls::RowDefinition contentRow{}; titlebarRow.Height(GridLengthHelper::Auto()); _rootGrid.RowDefinitions().Append(titlebarRow); _rootGrid.RowDefinitions().Append(contentRow); // Create our titlebar control _titlebar = winrt::TerminalApp::TitlebarControl{ reinterpret_cast<uint64_t>(GetHandle()) }; _dragBar = _titlebar.DragBar(); _dragBar.SizeChanged({ this, &NonClientIslandWindow::_OnDragBarSizeChanged }); _rootGrid.SizeChanged({ this, &NonClientIslandWindow::_OnDragBarSizeChanged }); _rootGrid.Children().Append(_titlebar); Controls::Grid::SetRow(_titlebar, 0); } // Method Description: // - Set the content of the "client area" of our window to the given content. // Arguments: // - content: the new UI element to use as the client content // Return Value: // - <none> void NonClientIslandWindow::SetContent(winrt::Windows::UI::Xaml::UIElement content) { _clientContent = content; _rootGrid.Children().Append(content); // SetRow only works on FrameworkElement's, so cast it to a FWE before // calling. We know that our content is a Grid, so we don't need to worry // about this. const auto fwe = content.try_as<winrt::Windows::UI::Xaml::FrameworkElement>(); if (fwe) { Controls::Grid::SetRow(fwe, 1); } } // Method Description: // - Set the content of the "titlebar area" of our window to the given content. // Arguments: // - content: the new UI element to use as the titlebar content // Return Value: // - <none> void NonClientIslandWindow::SetTitlebarContent(winrt::Windows::UI::Xaml::UIElement content) { _titlebar.Content(content); // GH#4288 - add a SizeChanged handler to this content. It's possible that // this element's size will change after the dragbar's. When that happens, // the drag bar won't send another SizeChanged event, because the dragbar's // _size_ didn't change, only it's position. const auto fwe = content.try_as<winrt::Windows::UI::Xaml::FrameworkElement>(); if (fwe) { fwe.SizeChanged({ this, &NonClientIslandWindow::_OnDragBarSizeChanged }); } } // Method Description: // - This method computes the height of the little border above the title bar // and returns it. If the border is disabled, then this method will return 0. // Return Value: // - the height of the border above the title bar or 0 if it's disabled int NonClientIslandWindow::_GetTopBorderHeight() const noexcept { if (_isMaximized || _fullscreen) { // no border when maximized return 0; } return topBorderVisibleHeight; } RECT NonClientIslandWindow::_GetDragAreaRect() const noexcept { if (_dragBar && _dragBar.Visibility() == Visibility::Visible) { const auto scale = GetCurrentDpiScale(); const auto transform = _dragBar.TransformToVisual(_rootGrid); const auto logicalDragBarRect = winrt::Windows::Foundation::Rect{ 0.0f, 0.0f, static_cast<float>(_dragBar.ActualWidth()), static_cast<float>(_dragBar.ActualHeight()) }; const auto clientDragBarRect = transform.TransformBounds(logicalDragBarRect); RECT dragBarRect = { static_cast<LONG>(clientDragBarRect.X * scale), static_cast<LONG>(clientDragBarRect.Y * scale), static_cast<LONG>((clientDragBarRect.Width + clientDragBarRect.X) * scale), static_cast<LONG>((clientDragBarRect.Height + clientDragBarRect.Y) * scale), }; return dragBarRect; } return RECT{}; } // Method Description: // - Called when the size of the window changes for any reason. Updates the // XAML island to match our new sizing and also updates the maximize icon // if the window went from maximized to restored or the opposite. void NonClientIslandWindow::OnSize(const UINT width, const UINT height) { _UpdateMaximizedState(); if (_interopWindowHandle) { _UpdateIslandPosition(width, height); } } // Method Description: // - Checks if the window has been maximized or restored since the last time. // If it has been maximized or restored, then it updates the _isMaximized // flags and notifies of the change by calling // NonClientIslandWindow::_OnMaximizeChange. void NonClientIslandWindow::_UpdateMaximizedState() { const auto windowStyle = GetWindowStyle(_window.get()); const auto newIsMaximized = WI_IsFlagSet(windowStyle, WS_MAXIMIZE); if (_isMaximized != newIsMaximized) { _isMaximized = newIsMaximized; _OnMaximizeChange(); } } // Method Description: // - Called when the the windows goes from restored to maximized or from // maximized to restored. Updates the maximize button's icon and the frame // margins. void NonClientIslandWindow::_OnMaximizeChange() noexcept { if (_titlebar) { const auto windowStyle = GetWindowStyle(_window.get()); const auto isIconified = WI_IsFlagSet(windowStyle, WS_ICONIC); const auto state = _isMaximized ? winrt::TerminalApp::WindowVisualState::WindowVisualStateMaximized : isIconified ? winrt::TerminalApp::WindowVisualState::WindowVisualStateIconified : winrt::TerminalApp::WindowVisualState::WindowVisualStateNormal; try { _titlebar.SetWindowVisualState(state); } CATCH_LOG(); } // no frame margin when maximized _UpdateFrameMargins(); } // Method Description: // - Called when the size of the window changes for any reason. Updates the // sizes of our child XAML Islands to match our new sizing. void NonClientIslandWindow::_UpdateIslandPosition(const UINT windowWidth, const UINT windowHeight) { const auto topBorderHeight = Utils::ClampToShortMax(_GetTopBorderHeight(), 0); const COORD newIslandPos = { 0, topBorderHeight }; winrt::check_bool(SetWindowPos(_interopWindowHandle, HWND_BOTTOM, newIslandPos.X, newIslandPos.Y, windowWidth, windowHeight - topBorderHeight, SWP_SHOWWINDOW)); // This happens when we go from maximized to restored or the opposite // because topBorderHeight changes. if (!_oldIslandPos.has_value() || _oldIslandPos.value() != newIslandPos) { // The drag bar's position changed compared to the client area because // the island moved but we will not be notified about this in the // NonClientIslandWindow::OnDragBarSizeChanged method because this // method is only called when the position of the drag bar changes // **inside** the island which is not the case here. _ResizeDragBarWindow(); _oldIslandPos = { newIslandPos }; } } // Method Description: // - Returns the height of the little space at the top of the window used to // resize the window. // Return Value: // - the height of the window's top resize handle int NonClientIslandWindow::_GetResizeHandleHeight() const noexcept { // there isn't a SM_CYPADDEDBORDER for the Y axis return ::GetSystemMetricsForDpi(SM_CXPADDEDBORDER, _currentDpi) + ::GetSystemMetricsForDpi(SM_CYSIZEFRAME, _currentDpi); } // Method Description: // - Responds to the WM_NCCALCSIZE message by calculating and creating the new // window frame. [[nodiscard]] LRESULT NonClientIslandWindow::_OnNcCalcSize(const WPARAM wParam, const LPARAM lParam) noexcept { if (wParam == false) { return 0; } NCCALCSIZE_PARAMS* params = reinterpret_cast<NCCALCSIZE_PARAMS*>(lParam); // Store the original top before the default window proc applies the // default frame. const auto originalTop = params->rgrc[0].top; const auto originalSize = params->rgrc[0]; // apply the default frame auto ret = DefWindowProc(_window.get(), WM_NCCALCSIZE, wParam, lParam); if (ret != 0) { return ret; } auto newSize = params->rgrc[0]; // Re-apply the original top from before the size of the default frame was applied. newSize.top = originalTop; // WM_NCCALCSIZE is called before WM_SIZE _UpdateMaximizedState(); // We don't need this correction when we're fullscreen. We will have the // WS_POPUP size, so we don't have to worry about borders, and the default // frame will be fine. if (_isMaximized && !_fullscreen) { // When a window is maximized, its size is actually a little bit more // than the monitor's work area. The window is positioned and sized in // such a way that the resize handles are outside of the monitor and // then the window is clipped to the monitor so that the resize handle // do not appear because you don't need them (because you can't resize // a window when it's maximized unless you restore it). newSize.top += _GetResizeHandleHeight(); } // GH#1438 - Attempt to detect if there's an autohide taskbar, and if there // is, reduce our size a bit on the side with the taskbar, so the user can // still mouse-over the taskbar to reveal it. // GH#5209 - make sure to use MONITOR_DEFAULTTONEAREST, so that this will // still find the right monitor even when we're restoring from minimized. HMONITOR hMon = MonitorFromWindow(_window.get(), MONITOR_DEFAULTTONEAREST); if (hMon && (_isMaximized || _fullscreen)) { MONITORINFO monInfo{ 0 }; monInfo.cbSize = sizeof(MONITORINFO); GetMonitorInfo(hMon, &monInfo); // First, check if we have an auto-hide taskbar at all: APPBARDATA autohide{ 0 }; autohide.cbSize = sizeof(autohide); UINT state = (UINT)SHAppBarMessage(ABM_GETSTATE, &autohide); if (WI_IsFlagSet(state, ABS_AUTOHIDE)) { // This helper can be used to determine if there's a auto-hide // taskbar on the given edge of the monitor we're currently on. auto hasAutohideTaskbar = [&monInfo](const UINT edge) -> bool { APPBARDATA data{ 0 }; data.cbSize = sizeof(data); data.uEdge = edge; data.rc = monInfo.rcMonitor; HWND hTaskbar = (HWND)SHAppBarMessage(ABM_GETAUTOHIDEBAREX, &data); return hTaskbar != nullptr; }; const bool onTop = hasAutohideTaskbar(ABE_TOP); const bool onBottom = hasAutohideTaskbar(ABE_BOTTOM); const bool onLeft = hasAutohideTaskbar(ABE_LEFT); const bool onRight = hasAutohideTaskbar(ABE_RIGHT); // If there's a taskbar on any side of the monitor, reduce our size // a little bit on that edge. // // Note to future code archeologists: // This doesn't seem to work for fullscreen on the primary display. // However, testing a bunch of other apps with fullscreen modes // and an auto-hiding taskbar has shown that _none_ of them // reveal the taskbar from fullscreen mode. This includes Edge, // Firefox, Chrome, Sublime Text, PowerPoint - none seemed to // support this. // // This does however work fine for maximized. if (onTop) { // Peculiarly, when we're fullscreen, newSize.top += AutohideTaskbarSize; } if (onBottom) { newSize.bottom -= AutohideTaskbarSize; } if (onLeft) { newSize.left += AutohideTaskbarSize; } if (onRight) { newSize.right -= AutohideTaskbarSize; } } } params->rgrc[0] = newSize; return 0; } // Method Description: // - Hit test the frame for resizing and moving. // Arguments: // - ptMouse: the mouse point being tested, in absolute (NOT WINDOW) coordinates. // Return Value: // - one of the values from // https://docs.microsoft.com/en-us/windows/desktop/inputdev/wm-nchittest#return-value // corresponding to the area of the window that was hit [[nodiscard]] LRESULT NonClientIslandWindow::_OnNcHitTest(POINT ptMouse) const noexcept { // This will handle the left, right and bottom parts of the frame because // we didn't change them. LPARAM lParam = MAKELONG(ptMouse.x, ptMouse.y); const auto originalRet = DefWindowProc(_window.get(), WM_NCHITTEST, 0, lParam); if (originalRet != HTCLIENT) { return originalRet; } // At this point, we know that the cursor is inside the client area so it // has to be either the little border at the top of our custom title bar, // the drag bar or something else in the XAML island. But the XAML Island // handles WM_NCHITTEST on its own so actually it cannot be the XAML // Island. Then it must be the drag bar or the little border at the top // which the user can use to move or resize the window. RECT rcWindow; winrt::check_bool(::GetWindowRect(_window.get(), &rcWindow)); const auto resizeBorderHeight = _GetResizeHandleHeight(); const auto isOnResizeBorder = ptMouse.y < rcWindow.top + resizeBorderHeight; // the top of the drag bar is used to resize the window if (!_isMaximized && isOnResizeBorder) { return HTTOP; } return HTCAPTION; } // Method Description: // - Sets the cursor to the sizing cursor when we hit-test the top sizing border. // We need to do this because we've covered it up with a child window. [[nodiscard]] LRESULT NonClientIslandWindow::_OnSetCursor(WPARAM wParam, LPARAM lParam) const noexcept { if (LOWORD(lParam) == HTCLIENT) { // Get the cursor position from the _last message_ and not from // `GetCursorPos` (which returns the cursor position _at the // moment_) because if we're lagging behind the cursor's position, // we still want to get the cursor position that was associated // with that message at the time it was sent to handle the message // correctly. const auto screenPtLparam{ GetMessagePos() }; const LRESULT hitTest{ SendMessage(GetWindowHandle(), WM_NCHITTEST, 0, screenPtLparam) }; if (hitTest == HTTOP) { // We have to set the vertical resize cursor manually on // the top resize handle because Windows thinks that the // cursor is on the client area because it asked the asked // the drag window with `WM_NCHITTEST` and it returned // `HTCLIENT`. // We don't want to modify the drag window's `WM_NCHITTEST` // handling to return `HTTOP` because otherwise, the system // would resize the drag window instead of the top level // window! SetCursor(LoadCursor(nullptr, IDC_SIZENS)); return TRUE; } else { // reset cursor SetCursor(LoadCursor(nullptr, IDC_ARROW)); return TRUE; } } return DefWindowProc(GetWindowHandle(), WM_SETCURSOR, wParam, lParam); } // Method Description: // - Gets the difference between window and client area size. // Arguments: // - dpi: dpi of a monitor on which the window is placed // Return Value // - The size difference SIZE NonClientIslandWindow::GetTotalNonClientExclusiveSize(UINT dpi) const noexcept { const auto windowStyle = static_cast<DWORD>(GetWindowLong(_window.get(), GWL_STYLE)); RECT islandFrame{}; // If we failed to get the correct window size for whatever reason, log // the error and go on. We'll use whatever the control proposed as the // size of our window, which will be at least close. LOG_IF_WIN32_BOOL_FALSE(AdjustWindowRectExForDpi(&islandFrame, windowStyle, false, 0, dpi)); islandFrame.top = -topBorderVisibleHeight; // If we have a titlebar, this is being called after we've initialized, and // we can just ask that titlebar how big it wants to be. const auto titleBarHeight = _titlebar ? static_cast<LONG>(_titlebar.ActualHeight()) : 0; return { islandFrame.right - islandFrame.left, islandFrame.bottom - islandFrame.top + titleBarHeight }; } // Method Description: // - Updates the borders of our window frame, using DwmExtendFrameIntoClientArea. // Arguments: // - <none> // Return Value: // - the HRESULT returned by DwmExtendFrameIntoClientArea. void NonClientIslandWindow::_UpdateFrameMargins() const noexcept { MARGINS margins = {}; if (_GetTopBorderHeight() != 0) { RECT frame = {}; winrt::check_bool(::AdjustWindowRectExForDpi(&frame, GetWindowStyle(_window.get()), FALSE, 0, _currentDpi)); // We removed the whole top part of the frame (see handling of // WM_NCCALCSIZE) so the top border is missing now. We add it back here. // Note #1: You might wonder why we don't remove just the title bar instead // of removing the whole top part of the frame and then adding the little // top border back. I tried to do this but it didn't work: DWM drew the // whole title bar anyways on top of the window. It seems that DWM only // wants to draw either nothing or the whole top part of the frame. // Note #2: For some reason if you try to set the top margin to just the // top border height (what we want to do), then there is a transparency // bug when the window is inactive, so I've decided to add the whole top // part of the frame instead and then we will hide everything that we // don't need (that is, the whole thing but the little 1 pixel wide border // at the top) in the WM_PAINT handler. This eliminates the transparency // bug and it's what a lot of Win32 apps that customize the title bar do // so it should work fine. margins.cyTopHeight = -frame.top; } // Extend the frame into the client area. microsoft/terminal#2735 - Just log // the failure here, don't crash. If DWM crashes for any reason, calling // THROW_IF_FAILED() will cause us to take a trip upstate. Just log, and // we'll fix ourselves when DWM comes back. LOG_IF_FAILED(DwmExtendFrameIntoClientArea(_window.get(), &margins)); } // Method Description: // - Handle window messages from the message loop. // Arguments: // - message: A window message ID identifying the message. // - wParam: The contents of this parameter depend on the value of the message parameter. // - lParam: The contents of this parameter depend on the value of the message parameter. // Return Value: // - The return value is the result of the message processing and depends on the // message sent. [[nodiscard]] LRESULT NonClientIslandWindow::MessageHandler(UINT const message, WPARAM const wParam, LPARAM const lParam) noexcept { switch (message) { case WM_SETCURSOR: return _OnSetCursor(wParam, lParam); case WM_DISPLAYCHANGE: // GH#4166: When the DPI of the monitor changes out from underneath us, // resize our drag bar, to reflect its newly scaled size. _ResizeDragBarWindow(); return 0; case WM_NCCALCSIZE: return _OnNcCalcSize(wParam, lParam); case WM_NCHITTEST: return _OnNcHitTest({ GET_X_LPARAM(lParam), GET_Y_LPARAM(lParam) }); case WM_PAINT: return _OnPaint(); } return IslandWindow::MessageHandler(message, wParam, lParam); } // Method Description: // - This method is called when the window receives the WM_PAINT message. // - It paints the client area with the color of the title bar to hide the // system's title bar behind the XAML Islands window during a resize. // Indeed, the XAML Islands window doesn't resize at the same time than // the top level window // (see https://github.com/microsoft/microsoft-ui-xaml/issues/759). // Return Value: // - The value returned from the window proc. [[nodiscard]] LRESULT NonClientIslandWindow::_OnPaint() noexcept { if (!_titlebar) { return 0; } PAINTSTRUCT ps{ 0 }; const auto hdc = wil::BeginPaint(_window.get(), &ps); if (!hdc) { return 0; } const auto topBorderHeight = _GetTopBorderHeight(); if (ps.rcPaint.top < topBorderHeight) { RECT rcTopBorder = ps.rcPaint; rcTopBorder.bottom = topBorderHeight; // To show the original top border, we have to paint on top of it with // the alpha component set to 0. This page recommends to paint the area // in black using the stock BLACK_BRUSH to do this: // https://docs.microsoft.com/en-us/windows/win32/dwm/customframe#extending-the-client-frame ::FillRect(hdc.get(), &rcTopBorder, GetStockBrush(BLACK_BRUSH)); } if (ps.rcPaint.bottom > topBorderHeight) { RECT rcRest = ps.rcPaint; rcRest.top = topBorderHeight; const auto backgroundBrush = _titlebar.Background(); const auto backgroundSolidBrush = backgroundBrush.as<Media::SolidColorBrush>(); const auto backgroundColor = backgroundSolidBrush.Color(); const auto color = RGB(backgroundColor.R, backgroundColor.G, backgroundColor.B); if (!_backgroundBrush || color != _backgroundBrushColor) { // Create brush for titlebar color. _backgroundBrush = wil::unique_hbrush(CreateSolidBrush(color)); } // To hide the original title bar, we have to paint on top of it with // the alpha component set to 255. This is a hack to do it with GDI. // See NonClientIslandWindow::_UpdateFrameMargins for more information. HDC opaqueDc; BP_PAINTPARAMS params = { sizeof(params), BPPF_NOCLIP | BPPF_ERASE }; HPAINTBUFFER buf = BeginBufferedPaint(hdc.get(), &rcRest, BPBF_TOPDOWNDIB, &params, &opaqueDc); if (!buf || !opaqueDc) { winrt::throw_last_error(); } ::FillRect(opaqueDc, &rcRest, _backgroundBrush.get()); ::BufferedPaintSetAlpha(buf, nullptr, 255); ::EndBufferedPaint(buf, TRUE); } return 0; } // Method Description: // - This method is called when the window receives the WM_NCCREATE message. // Return Value: // - The value returned from the window proc. [[nodiscard]] LRESULT NonClientIslandWindow::_OnNcCreate(WPARAM wParam, LPARAM lParam) noexcept { const auto ret = IslandWindow::_OnNcCreate(wParam, lParam); if (ret == FALSE) { return ret; } // Set the frame's theme before it is rendered (WM_NCPAINT) so that it is // rendered with the correct theme. _UpdateFrameTheme(); return TRUE; } // Method Description: // - Updates the window frame's theme depending on the application theme (light // or dark). This doesn't invalidate the old frame so it will not be // rerendered until the user resizes or focuses/unfocuses the window. // Return Value: // - <none> void NonClientIslandWindow::_UpdateFrameTheme() const { bool isDarkMode; switch (_theme) { case ElementTheme::Light: isDarkMode = false; break; case ElementTheme::Dark: isDarkMode = true; break; default: isDarkMode = Application::Current().RequestedTheme() == ApplicationTheme::Dark; break; } LOG_IF_FAILED(TerminalTrySetDarkTheme(_window.get(), isDarkMode)); } // Method Description: // - Called when the app wants to change its theme. We'll update the frame // theme to match the new theme. // Arguments: // - requestedTheme: the ElementTheme to use as the new theme for the UI // Return Value: // - <none> void NonClientIslandWindow::OnApplicationThemeChanged(const ElementTheme& requestedTheme) { IslandWindow::OnApplicationThemeChanged(requestedTheme); _theme = requestedTheme; _UpdateFrameTheme(); } // Method Description: // - Enable or disable fullscreen mode. When entering fullscreen mode, we'll // need to manually hide the entire titlebar. // - See also IslandWindow::_SetIsFullscreen, which does additional work. // Arguments: // - fullscreenEnabled: If true, we're entering fullscreen mode. If false, we're leaving. // Return Value: // - <none> void NonClientIslandWindow::_SetIsFullscreen(const bool fullscreenEnabled) { IslandWindow::_SetIsFullscreen(fullscreenEnabled); _titlebar.Visibility(!fullscreenEnabled ? Visibility::Visible : Visibility::Collapsed); // GH#4224 - When the auto-hide taskbar setting is enabled, then we don't // always get another window message to trigger us to remove the drag bar. // So, make sure to update the size of the drag region here, so that it // _definitely_ goes away. _ResizeDragBarWindow(); } // Method Description: // - Returns true if the titlebar is visible. For things like fullscreen mode, // borderless mode, this will return false. // Arguments: // - <none> // Return Value: // - true iff the titlebar is visible bool NonClientIslandWindow::_IsTitlebarVisible() const { // TODO:GH#2238 - When we add support for titlebar-less mode, this should be // updated to include that mode. return !_fullscreen; }
#ifndef ENTT_ENTITY_STORAGE_HPP #define ENTT_ENTITY_STORAGE_HPP #include <algorithm> #include <iterator> #include <numeric> #include <utility> #include <vector> #include <cstddef> #include <type_traits> #include "../config/config.h" #include "../core/algorithm.hpp" #include "sparse_set.hpp" #include "entity.hpp" namespace entt { /** * @brief Basic storage implementation. * * This class is a refinement of a sparse set that associates an object to an * entity. The main purpose of this class is to extend sparse sets to store * components in a registry. It guarantees fast access both to the elements and * to the entities. * * @note * Entities and objects have the same order. It's guaranteed both in case of raw * access (either to entities or objects) and when using random or input access * iterators. * * @note * Internal data structures arrange elements to maximize performance. Because of * that, there are no guarantees that elements have the expected order when * iterate directly the internal packed array (see `raw` and `size` member * functions for that). Use `begin` and `end` instead. * * @warning * Empty types aren't explicitly instantiated. Temporary objects are returned in * place of the instances of the components and raw access isn't available for * them. * * @sa sparse_set<Entity> * * @tparam Entity A valid entity type (see entt_traits for more details). * @tparam Type Type of objects assigned to the entities. */ template<typename Entity, typename Type, typename = std::void_t<>> class basic_storage: public sparse_set<Entity> { using underlying_type = sparse_set<Entity>; using traits_type = entt_traits<std::underlying_type_t<Entity>>; template<bool Const> class iterator { friend class basic_storage<Entity, Type>; using instance_type = std::conditional_t<Const, const std::vector<Type>, std::vector<Type>>; using index_type = typename traits_type::difference_type; iterator(instance_type *ref, const index_type idx) ENTT_NOEXCEPT : instances{ref}, index{idx} {} public: using difference_type = index_type; using value_type = Type; using pointer = std::conditional_t<Const, const value_type *, value_type *>; using reference = std::conditional_t<Const, const value_type &, value_type &>; using iterator_category = std::random_access_iterator_tag; iterator() ENTT_NOEXCEPT = default; iterator & operator++() ENTT_NOEXCEPT { return --index, *this; } iterator operator++(int) ENTT_NOEXCEPT { iterator orig = *this; return ++(*this), orig; } iterator & operator--() ENTT_NOEXCEPT { return ++index, *this; } iterator operator--(int) ENTT_NOEXCEPT { iterator orig = *this; return --(*this), orig; } iterator & operator+=(const difference_type value) ENTT_NOEXCEPT { index -= value; return *this; } iterator operator+(const difference_type value) const ENTT_NOEXCEPT { return iterator{instances, index-value}; } iterator & operator-=(const difference_type value) ENTT_NOEXCEPT { return (*this += -value); } iterator operator-(const difference_type value) const ENTT_NOEXCEPT { return (*this + -value); } difference_type operator-(const iterator &other) const ENTT_NOEXCEPT { return other.index - index; } reference operator[](const difference_type value) const ENTT_NOEXCEPT { const auto pos = size_type(index-value-1); return (*instances)[pos]; } bool operator==(const iterator &other) const ENTT_NOEXCEPT { return other.index == index; } bool operator!=(const iterator &other) const ENTT_NOEXCEPT { return !(*this == other); } bool operator<(const iterator &other) const ENTT_NOEXCEPT { return index > other.index; } bool operator>(const iterator &other) const ENTT_NOEXCEPT { return index < other.index; } bool operator<=(const iterator &other) const ENTT_NOEXCEPT { return !(*this > other); } bool operator>=(const iterator &other) const ENTT_NOEXCEPT { return !(*this < other); } pointer operator->() const ENTT_NOEXCEPT { const auto pos = size_type(index-1); return &(*instances)[pos]; } reference operator*() const ENTT_NOEXCEPT { return *operator->(); } private: instance_type *instances; index_type index; }; public: /*! @brief Type of the objects associated with the entities. */ using object_type = Type; /*! @brief Underlying entity identifier. */ using entity_type = Entity; /*! @brief Unsigned integer type. */ using size_type = std::size_t; /*! @brief Random access iterator type. */ using iterator_type = iterator<false>; /*! @brief Constant random access iterator type. */ using const_iterator_type = iterator<true>; /** * @brief Increases the capacity of a storage. * * If the new capacity is greater than the current capacity, new storage is * allocated, otherwise the method does nothing. * * @param cap Desired capacity. */ void reserve(const size_type cap) { underlying_type::reserve(cap); instances.reserve(cap); } /*! @brief Requests the removal of unused capacity. */ void shrink_to_fit() { underlying_type::shrink_to_fit(); instances.shrink_to_fit(); } /** * @brief Direct access to the array of objects. * * The returned pointer is such that range `[raw(), raw() + size()]` is * always a valid range, even if the container is empty. * * @note * There are no guarantees on the order, even though either `sort` or * `respect` has been previously invoked. Internal data structures arrange * elements to maximize performance. Accessing them directly gives a * performance boost but less guarantees. Use `begin` and `end` if you want * to iterate the storage in the expected order. * * @return A pointer to the array of objects. */ const object_type * raw() const ENTT_NOEXCEPT { return instances.data(); } /*! @copydoc raw */ object_type * raw() ENTT_NOEXCEPT { return const_cast<object_type *>(std::as_const(*this).raw()); } /** * @brief Returns an iterator to the beginning. * * The returned iterator points to the first instance of the given type. If * the storage is empty, the returned iterator will be equal to `end()`. * * @note * Random access iterators stay true to the order imposed by a call to * either `sort` or `respect`. * * @return An iterator to the first instance of the given type. */ const_iterator_type cbegin() const ENTT_NOEXCEPT { const typename traits_type::difference_type pos = underlying_type::size(); return const_iterator_type{&instances, pos}; } /*! @copydoc cbegin */ const_iterator_type begin() const ENTT_NOEXCEPT { return cbegin(); } /*! @copydoc begin */ iterator_type begin() ENTT_NOEXCEPT { const typename traits_type::difference_type pos = underlying_type::size(); return iterator_type{&instances, pos}; } /** * @brief Returns an iterator to the end. * * The returned iterator points to the element following the last instance * of the given type. Attempting to dereference the returned iterator * results in undefined behavior. * * @note * Random access iterators stay true to the order imposed by a call to * either `sort` or `respect`. * * @return An iterator to the element following the last instance of the * given type. */ const_iterator_type cend() const ENTT_NOEXCEPT { return const_iterator_type{&instances, {}}; } /*! @copydoc cend */ const_iterator_type end() const ENTT_NOEXCEPT { return cend(); } /*! @copydoc end */ iterator_type end() ENTT_NOEXCEPT { return iterator_type{&instances, {}}; } /** * @brief Returns the object associated with an entity. * * @warning * Attempting to use an entity that doesn't belong to the storage results in * undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * storage doesn't contain the given entity. * * @param entt A valid entity identifier. * @return The object associated with the entity. */ const object_type & get(const entity_type entt) const ENTT_NOEXCEPT { return instances[underlying_type::index(entt)]; } /*! @copydoc get */ object_type & get(const entity_type entt) ENTT_NOEXCEPT { return const_cast<object_type &>(std::as_const(*this).get(entt)); } /** * @brief Returns a pointer to the object associated with an entity, if any. * @param entt A valid entity identifier. * @return The object associated with the entity, if any. */ const object_type * try_get(const entity_type entt) const ENTT_NOEXCEPT { return underlying_type::has(entt) ? (instances.data() + underlying_type::index(entt)) : nullptr; } /*! @copydoc try_get */ object_type * try_get(const entity_type entt) ENTT_NOEXCEPT { return const_cast<object_type *>(std::as_const(*this).try_get(entt)); } /** * @brief Assigns an entity to a storage and constructs its object. * * This version accept both types that can be constructed in place directly * and types like aggregates that do not work well with a placement new as * performed usually under the hood during an _emplace back_. * * @warning * Attempting to use an entity that already belongs to the storage results * in undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * storage already contains the given entity. * * @tparam Args Types of arguments to use to construct the object. * @param entt A valid entity identifier. * @param args Parameters to use to construct an object for the entity. * @return The object associated with the entity. */ template<typename... Args> object_type & construct(const entity_type entt, Args &&... args) { if constexpr(std::is_aggregate_v<object_type>) { instances.emplace_back(Type{std::forward<Args>(args)...}); } else { instances.emplace_back(std::forward<Args>(args)...); } // entity goes after component in case constructor throws underlying_type::construct(entt); return instances.back(); } /** * @brief Assigns one or more entities to a storage and default constructs * or copy constructs their objects. * * The object type must be at least move and default insertable if no * arguments are provided, move and copy insertable otherwise. * * @warning * Attempting to assign an entity that already belongs to the storage * results in undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * storage already contains the given entity. * * @tparam It Type of forward iterator. * @tparam Args Types of arguments to use to construct the object. * @param first An iterator to the first element of the range of entities. * @param last An iterator past the last element of the range of entities. * @param args Parameters to use to construct an object for the entities. * @return An iterator to the list of instances just created and sorted the * same of the entities. */ template<typename It, typename... Args> iterator_type batch(It first, It last, Args &&... args) { if constexpr(sizeof...(Args) == 0) { instances.resize(instances.size() + std::distance(first, last)); } else { instances.resize(instances.size() + std::distance(first, last), Type{std::forward<Args>(args)...}); } // entity goes after component in case constructor throws underlying_type::batch(first, last); return begin(); } /** * @brief Removes an entity from a storage and destroys its object. * * @warning * Attempting to use an entity that doesn't belong to the storage results in * undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * storage doesn't contain the given entity. * * @param entt A valid entity identifier. */ void destroy(const entity_type entt) { auto other = std::move(instances.back()); instances[underlying_type::index(entt)] = std::move(other); instances.pop_back(); underlying_type::destroy(entt); } /** * @brief Swaps entities and objects in the internal packed arrays. * * @warning * Attempting to swap entities that don't belong to the sparse set results * in undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * sparse set doesn't contain the given entities. * * @param lhs A valid entity identifier. * @param rhs A valid entity identifier. */ void swap(const entity_type lhs, const entity_type rhs) ENTT_NOEXCEPT override { std::swap(instances[underlying_type::index(lhs)], instances[underlying_type::index(rhs)]); underlying_type::swap(lhs, rhs); } /** * @brief Sort elements according to the given comparison function. * * Sort the elements so that iterating the range with a couple of iterators * returns them in the expected order. See `begin` and `end` for more * details. * * The comparison function object must return `true` if the first element * is _less_ than the second one, `false` otherwise. The signature of the * comparison function should be equivalent to one of the following: * * @code{.cpp} * bool(const Entity, const Entity); * bool(const Type &, const Type &); * @endcode * * Moreover, the comparison function object shall induce a * _strict weak ordering_ on the values. * * The sort function oject must offer a member function template * `operator()` that accepts three arguments: * * * An iterator to the first element of the range to sort. * * An iterator past the last element of the range to sort. * * A comparison function to use to compare the elements. * * @note * Attempting to iterate elements using a raw pointer returned by a call to * either `data` or `raw` gives no guarantees on the order, even though * `sort` has been invoked. * * @tparam Compare Type of comparison function object. * @tparam Sort Type of sort function object. * @tparam Args Types of arguments to forward to the sort function object. * @param first An iterator to the first element of the range to sort. * @param last An iterator past the last element of the range to sort. * @param compare A valid comparison function object. * @param algo A valid sort function object. * @param args Arguments to forward to the sort function object, if any. */ template<typename Compare, typename Sort = std_sort, typename... Args> void sort(iterator_type first, iterator_type last, Compare compare, Sort algo = Sort{}, Args &&... args) { ENTT_ASSERT(!(last < first)); ENTT_ASSERT(!(last > end())); const auto from = underlying_type::begin() + std::distance(begin(), first); const auto to = from + std::distance(first, last); const auto apply = [this](const auto lhs, const auto rhs) { std::swap(instances[underlying_type::index(lhs)], instances[underlying_type::index(rhs)]); }; if constexpr(std::is_invocable_v<Compare, const object_type &, const object_type &>) { static_assert(!std::is_empty_v<object_type>); underlying_type::arrange(from, to, std::move(apply), [this, compare = std::move(compare)](const auto lhs, const auto rhs) { return compare(std::as_const(instances[underlying_type::index(lhs)]), std::as_const(instances[underlying_type::index(rhs)])); }, std::move(algo), std::forward<Args>(args)...); } else { underlying_type::arrange(from, to, std::move(apply), std::move(compare), std::move(algo), std::forward<Args>(args)...); } } /*! @brief Resets a storage. */ void reset() { underlying_type::reset(); instances.clear(); } private: std::vector<object_type> instances; }; /*! @copydoc basic_storage */ template<typename Entity, typename Type> class basic_storage<Entity, Type, std::enable_if_t<std::is_empty_v<Type>>>: public sparse_set<Entity> { using traits_type = entt_traits<std::underlying_type_t<Entity>>; using underlying_type = sparse_set<Entity>; class iterator { friend class basic_storage<Entity, Type>; using index_type = typename traits_type::difference_type; iterator(const index_type idx) ENTT_NOEXCEPT : index{idx} {} public: using difference_type = index_type; using value_type = Type; using pointer = const value_type *; using reference = value_type; using iterator_category = std::input_iterator_tag; iterator() ENTT_NOEXCEPT = default; iterator & operator++() ENTT_NOEXCEPT { return --index, *this; } iterator operator++(int) ENTT_NOEXCEPT { iterator orig = *this; return ++(*this), orig; } iterator & operator--() ENTT_NOEXCEPT { return ++index, *this; } iterator operator--(int) ENTT_NOEXCEPT { iterator orig = *this; return --(*this), orig; } iterator & operator+=(const difference_type value) ENTT_NOEXCEPT { index -= value; return *this; } iterator operator+(const difference_type value) const ENTT_NOEXCEPT { return iterator{index-value}; } iterator & operator-=(const difference_type value) ENTT_NOEXCEPT { return (*this += -value); } iterator operator-(const difference_type value) const ENTT_NOEXCEPT { return (*this + -value); } difference_type operator-(const iterator &other) const ENTT_NOEXCEPT { return other.index - index; } reference operator[](const difference_type) const ENTT_NOEXCEPT { return {}; } bool operator==(const iterator &other) const ENTT_NOEXCEPT { return other.index == index; } bool operator!=(const iterator &other) const ENTT_NOEXCEPT { return !(*this == other); } bool operator<(const iterator &other) const ENTT_NOEXCEPT { return index > other.index; } bool operator>(const iterator &other) const ENTT_NOEXCEPT { return index < other.index; } bool operator<=(const iterator &other) const ENTT_NOEXCEPT { return !(*this > other); } bool operator>=(const iterator &other) const ENTT_NOEXCEPT { return !(*this < other); } pointer operator->() const ENTT_NOEXCEPT { return nullptr; } reference operator*() const ENTT_NOEXCEPT { return {}; } private: index_type index; }; public: /*! @brief Type of the objects associated with the entities. */ using object_type = Type; /*! @brief Underlying entity identifier. */ using entity_type = Entity; /*! @brief Unsigned integer type. */ using size_type = std::size_t; /*! @brief Random access iterator type. */ using iterator_type = iterator; /** * @brief Returns an iterator to the beginning. * * The returned iterator points to the first instance of the given type. If * the storage is empty, the returned iterator will be equal to `end()`. * * @note * Input iterators stay true to the order imposed by a call to either `sort` * or `respect`. * * @return An iterator to the first instance of the given type. */ iterator_type cbegin() const ENTT_NOEXCEPT { const typename traits_type::difference_type pos = underlying_type::size(); return iterator_type{pos}; } /*! @copydoc cbegin */ iterator_type begin() const ENTT_NOEXCEPT { return cbegin(); } /** * @brief Returns an iterator to the end. * * The returned iterator points to the element following the last instance * of the given type. Attempting to dereference the returned iterator * results in undefined behavior. * * @note * Input iterators stay true to the order imposed by a call to either `sort` * or `respect`. * * @return An iterator to the element following the last instance of the * given type. */ iterator_type cend() const ENTT_NOEXCEPT { return iterator_type{}; } /*! @copydoc cend */ iterator_type end() const ENTT_NOEXCEPT { return cend(); } /** * @brief Returns the object associated with an entity. * * @note * Empty types aren't explicitly instantiated. Therefore, this function * always returns a temporary object. * * @warning * Attempting to use an entity that doesn't belong to the storage results in * undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * storage doesn't contain the given entity. * * @param entt A valid entity identifier. * @return The object associated with the entity. */ object_type get([[maybe_unused]] const entity_type entt) const ENTT_NOEXCEPT { ENTT_ASSERT(underlying_type::has(entt)); return {}; } /** * @brief Assigns one or more entities to a storage. * * The object type must be at least default constructible. * * @warning * Attempting to assign an entity that already belongs to the storage * results in undefined behavior.<br/> * An assertion will abort the execution at runtime in debug mode if the * storage already contains the given entity. * * @tparam It Type of forward iterator. * @param first An iterator to the first element of the range of entities. * @param last An iterator past the last element of the range of entities. * @return An iterator to the list of instances just created and sorted the * same of the entities. */ template<typename It> iterator_type batch(It first, It last, const object_type & = {}) { underlying_type::batch(first, last); return begin(); } }; /*! @copydoc basic_storage */ template<typename Entity, typename Type> struct storage: basic_storage<Entity, Type> {}; } #endif // ENTT_ENTITY_STORAGE_HPP
// // Copyright Aliaksei Levin (levlam@telegram.org), Arseny Smirnov (arseny30@gmail.com) 2014-2022 // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // #include "td/telegram/files/FileUploader.h" #include "td/telegram/files/FileLoaderUtils.h" #include "td/telegram/Global.h" #include "td/telegram/net/DcId.h" #include "td/telegram/net/NetQueryDispatcher.h" #include "td/telegram/SecureStorage.h" #include "td/telegram/telegram_api.h" #include "td/utils/buffer.h" #include "td/utils/common.h" #include "td/utils/crypto.h" #include "td/utils/format.h" #include "td/utils/logging.h" #include "td/utils/misc.h" #include "td/utils/port/path.h" #include "td/utils/Random.h" #include "td/utils/ScopeGuard.h" namespace td { FileUploader::FileUploader(const LocalFileLocation &local, const RemoteFileLocation &remote, int64 expected_size, const FileEncryptionKey &encryption_key, std::vector<int> bad_parts, unique_ptr<Callback> callback) : local_(local) , remote_(remote) , expected_size_(expected_size) , encryption_key_(encryption_key) , bad_parts_(std::move(bad_parts)) , callback_(std::move(callback)) { if (encryption_key_.is_secret()) { iv_ = encryption_key_.mutable_iv(); generate_iv_ = encryption_key_.iv_slice().str(); } if (remote_.type() == RemoteFileLocation::Type::Partial && encryption_key_.is_secure() && remote_.partial().part_count_ != remote_.partial().ready_part_count_) { remote_ = RemoteFileLocation{}; } } Result<FileLoader::FileInfo> FileUploader::init() { if (remote_.type() == RemoteFileLocation::Type::Full) { return Status::Error("File is already uploaded"); } // file_size is needed only for partial local locations, but for uploaded partial files // size is yet unknown or local location is full, so we can always pass 0 here TRY_RESULT(prefix_info, on_update_local_location(local_, 0)); (void)prefix_info; int offset = 0; int part_size = 0; if (remote_.type() == RemoteFileLocation::Type::Partial) { const auto &partial = remote_.partial(); file_id_ = partial.file_id_; part_size = partial.part_size_; big_flag_ = partial.is_big_ != 0; offset = partial.ready_part_count_; } else { file_id_ = Random::secure_int64(); big_flag_ = is_file_big(file_type_, expected_size_); } std::vector<bool> ok(offset, true); for (auto bad_id : bad_parts_) { if (bad_id >= 0 && bad_id < offset) { ok[bad_id] = false; } } std::vector<int> parts; for (int i = 0; i < offset; i++) { if (ok[i]) { parts.push_back(i); } } LOG(DEBUG) << "Init file uploader for " << remote_ << " with offset = " << offset << " and part size = " << part_size; if (!ok.empty() && !ok[0]) { parts.clear(); } FileInfo res; res.size = local_size_; res.is_size_final = local_is_ready_; res.part_size = part_size; res.ready_parts = std::move(parts); res.is_upload = true; return res; } Result<FileLoader::PrefixInfo> FileUploader::on_update_local_location(const LocalFileLocation &location, int64 file_size) { SCOPE_EXIT { try_release_fd(); }; if (encryption_key_.is_secure() && !fd_path_.empty()) { return Status::Error("Can't change local location for Secure file"); } string path; int64 local_size = -1; bool local_is_ready{false}; FileType file_type{FileType::Temp}; if (location.type() == LocalFileLocation::Type::Empty || (location.type() == LocalFileLocation::Type::Partial && encryption_key_.is_secure())) { path = ""; local_size = 0; local_is_ready = false; file_type = FileType::Temp; } else if (location.type() == LocalFileLocation::Type::Partial) { path = location.partial().path_; local_size = Bitmask(Bitmask::Decode{}, location.partial().ready_bitmask_) .get_ready_prefix_size(0, location.partial().part_size_, file_size); local_is_ready = false; file_type = location.partial().file_type_; } else { path = location.full().path_; if (path.empty()) { return Status::Error("FullLocalFileLocation with empty path"); } local_is_ready = true; file_type = location.full().file_type_; } LOG(INFO) << "In FileUploader::on_update_local_location with " << location << ". Have path = \"" << path << "\", local_size = " << local_size << ", local_is_ready = " << local_is_ready << " and file type = " << file_type; file_type_ = file_type; bool is_temp = false; if (encryption_key_.is_secure() && local_is_ready && remote_.type() == RemoteFileLocation::Type::Empty) { TRY_RESULT(file_fd_path, open_temp_file(FileType::Temp)); file_fd_path.first.close(); auto new_path = std::move(file_fd_path.second); TRY_RESULT(hash, secure_storage::encrypt_file(encryption_key_.secret(), path, new_path)); LOG(INFO) << "ENCRYPT " << path << " " << new_path; callback_->on_hash(hash.as_slice().str()); path = new_path; is_temp = true; } if (!path.empty() && (path != fd_path_ || fd_.empty())) { auto res_fd = FileFd::open(path, FileFd::Read); // Race: partial location could be already deleted. Just ignore such locations if (res_fd.is_error()) { if (location.type() == LocalFileLocation::Type::Partial) { LOG(INFO) << "Ignore partial local location: " << res_fd.error(); PrefixInfo info; info.size = local_size_; info.is_ready = local_is_ready_; return info; } return res_fd.move_as_error(); } fd_.close(); fd_ = res_fd.move_as_ok(); fd_path_ = path; is_temp_ = is_temp; } if (local_is_ready) { CHECK(!fd_.empty()); TRY_RESULT_ASSIGN(local_size, fd_.get_size()); LOG(INFO) << "Set file local_size to " << local_size; if (local_size == 0) { return Status::Error("Can't upload empty file"); } } else if (!fd_.empty()) { TRY_RESULT(real_local_size, fd_.get_size()); if (real_local_size < local_size) { LOG(ERROR) << tag("real_local_size", real_local_size) << " < " << tag("local_size", local_size); PrefixInfo info; info.size = local_size_; info.is_ready = local_is_ready_; return info; } } local_size_ = local_size; if (expected_size_ < local_size_ && (expected_size_ != (10 << 20) || local_size_ >= (30 << 20))) { expected_size_ = local_size_; } local_is_ready_ = local_is_ready; PrefixInfo info; info.size = local_size_; info.is_ready = local_is_ready_; return info; } Status FileUploader::on_ok(int64 size) { fd_.close(); if (is_temp_) { LOG(INFO) << "UNLINK " << fd_path_; unlink(fd_path_).ignore(); } return Status::OK(); } void FileUploader::on_error(Status status) { fd_.close(); if (is_temp_) { LOG(INFO) << "UNLINK " << fd_path_; unlink(fd_path_).ignore(); } callback_->on_error(std::move(status)); } Status FileUploader::generate_iv_map() { LOG(INFO) << "Generate iv_map " << generate_offset_ << " " << local_size_; auto part_size = get_part_size(); auto encryption_key = FileEncryptionKey(encryption_key_.key_slice(), generate_iv_); BufferSlice bytes(part_size); if (iv_map_.empty()) { iv_map_.push_back(encryption_key.mutable_iv()); } CHECK(!fd_.empty()); for (; generate_offset_ + static_cast<int64>(part_size) < local_size_; generate_offset_ += static_cast<int64>(part_size)) { TRY_RESULT(read_size, fd_.pread(bytes.as_slice(), generate_offset_)); if (read_size != part_size) { return Status::Error("Failed to read file part (for iv_map)"); } aes_ige_encrypt(as_slice(encryption_key.key()), as_slice(encryption_key.mutable_iv()), bytes.as_slice(), bytes.as_slice()); iv_map_.push_back(encryption_key.mutable_iv()); } generate_iv_ = encryption_key.iv_slice().str(); return Status::OK(); } Status FileUploader::before_start_parts() { auto status = acquire_fd(); if (status.is_error() && !local_is_ready_) { return Status::Error(1, "Can't open temporary file"); } return status; } void FileUploader::after_start_parts() { try_release_fd(); } Result<std::pair<NetQueryPtr, bool>> FileUploader::start_part(Part part, int32 part_count, int64 streaming_offset) { auto padded_size = part.size; if (encryption_key_.is_secret()) { padded_size = (padded_size + 15) & ~15; } BufferSlice bytes(padded_size); TRY_RESULT(size, fd_.pread(bytes.as_slice().truncate(part.size), part.offset)); if (encryption_key_.is_secret()) { Random::secure_bytes(bytes.as_slice().substr(part.size)); if (next_offset_ == part.offset) { aes_ige_encrypt(as_slice(encryption_key_.key()), as_slice(iv_), bytes.as_slice(), bytes.as_slice()); next_offset_ += static_cast<int64>(bytes.size()); } else { if (part.id >= static_cast<int32>(iv_map_.size())) { TRY_STATUS(generate_iv_map()); } CHECK(part.id < static_cast<int32>(iv_map_.size()) && part.id >= 0); auto iv = iv_map_[part.id]; aes_ige_encrypt(as_slice(encryption_key_.key()), as_slice(iv), bytes.as_slice(), bytes.as_slice()); } } if (size != part.size) { return Status::Error("Failed to read file part"); } NetQueryPtr net_query; if (big_flag_) { auto query = telegram_api::upload_saveBigFilePart(file_id_, part.id, local_is_ready_ ? part_count : -1, std::move(bytes)); net_query = G()->net_query_creator().create(query, {}, DcId::main(), NetQuery::Type::Upload); } else { auto query = telegram_api::upload_saveFilePart(file_id_, part.id, std::move(bytes)); net_query = G()->net_query_creator().create(query, {}, DcId::main(), NetQuery::Type::Upload); } net_query->file_type_ = narrow_cast<int32>(file_type_); return std::make_pair(std::move(net_query), false); } Result<size_t> FileUploader::process_part(Part part, NetQueryPtr net_query) { if (net_query->is_error()) { return std::move(net_query->error()); } Result<bool> result = [&] { if (big_flag_) { return fetch_result<telegram_api::upload_saveBigFilePart>(net_query->ok()); } else { return fetch_result<telegram_api::upload_saveFilePart>(net_query->ok()); } }(); if (result.is_error()) { return result.move_as_error(); } if (!result.ok()) { // TODO: it is possible return Status::Error(500, "Internal Server Error during file upload"); } return part.size; } void FileUploader::on_progress(Progress progress) { callback_->on_partial_upload(PartialRemoteFileLocation{file_id_, progress.part_count, progress.part_size, progress.ready_part_count, big_flag_}, progress.ready_size); if (progress.is_ready) { callback_->on_ok(file_type_, PartialRemoteFileLocation{file_id_, progress.part_count, progress.part_size, progress.ready_part_count, big_flag_}, local_size_); } } FileLoader::Callback *FileUploader::get_callback() { return static_cast<FileLoader::Callback *>(callback_.get()); } void FileUploader::keep_fd_flag(bool keep_fd) { keep_fd_ = keep_fd; try_release_fd(); } void FileUploader::try_release_fd() { if (!keep_fd_ && !fd_.empty()) { fd_.close(); } } Status FileUploader::acquire_fd() { if (fd_.empty()) { TRY_RESULT_ASSIGN(fd_, FileFd::open(fd_path_, FileFd::Read)); } return Status::OK(); } } // namespace td
/************************************************************************** * Copyright(c) 1998-1999, ALICE Experiment at CERN, All rights reserved. * * * * Author: The ALICE Off-line Project. * * Contributors are mentioned in the code where appropriate. * * * * Permission to use, copy, modify and distribute this software and its * * documentation strictly for non-commercial purposes is hereby granted * * without fee, provided that the above copyright notice appears in all * * copies and that both the copyright notice and this permission notice * * appear in the supporting documentation. The authors make no claims * * about the suitability of this software for any purpose. It is * * provided "as is" without express or implied warranty. * ************************************v**************************************/ #include "TChain.h" #include "TH1F.h" #include "TH2F.h" #include "TList.h" #include "TTree.h" #include "TMath.h" #include "Riostream.h" #include "AliAnalysisTask.h" #include "AliAnalysisManager.h" #include "AliAODEvent.h" #include "AliMultSelection.h" #include "AliPIDResponse.h" #include "AliAODMCParticle.h" #include "AliAODInputHandler.h" #include "AliMCEventHandler.h" #include "AliStack.h" #include "AliVTrack.h" #include "AliAODv0.h" #include "TDatabasePDG.h" #include "AliExternalTrackParam.h" #include "AliAnalysisTaskK0SPFemto.h" #include "AliAnalysisK0SPEventCollection.h" class AliAnalysisTaskK0SPFemto; // your analysis class //class AliAnalysisK0SPEventCollection; class AliPIDResponse; class AliMultSelection; using namespace std; // std namespace: so you can do things like 'cout' ClassImp(AliAnalysisTaskK0SPFemto) // classimp: necessary for root AliAnalysisTaskK0SPFemto::AliAnalysisTaskK0SPFemto() :AliAnalysisTaskSE(), Neventi(0), fEventCuts(0), fAOD(NULL), fIsMC(kFALSE), fPIDResponse(0), farrGT(0), fTrackBufferSize(20200), // was 18k fEventColl(0x0), fEvt(0x0), fMaxFirstMult(20),//(3000), // 1000 for protons fMaxSecondMult(1000),//(20), // was 100 fzVertexBins(10), fnCentBins(20), fnEventsToMix(7), fFilterBit(4), fPDGMassFirst(0.), fPDGcodeFirst(310), fPDGMassSecond(0.), fPDGcodeSecond(2212), fHMtrigger(kFALSE), fHistSparseSignal(0), fHistSparseBkg(0), fOutputContainer(NULL), tCentrality(0), tSphericity(0), tSpherocity(0), tKtpair(0), tkStar(0), tIsCommonParton(0), tPtV0(0), tPTotV0(0), tThetaV0(0), tPhiV0(0), tDcaPosV0(0), tDcaNegV0(0), tInvMassK0s(0), tInvMassLambda(0), tInvMassAntiLambda(0), tCosPointingAngleV0(0), tPtP(0), tPTotP(0), tThetaP(0), tPhiP(0), tSignP(0), tDCAxyP(0), tDCAzP(0), tMassTOFP(0), tMCtruepair(0), tMCSameMother(0), tMCMotherV0(0), tMCMotherP(0), tMCptcTypeV0(0), tMCptcTypeP(0), tMCSameGM(0), tMotherPDG(0), tpdgcodeV0(0), tpdgcodeP(0), tKstarGen(0), fHistEventMultiplicity(0), fHistCentrality(0), fHistVertexDistribution(0), fHistSphericity(0), fHistSpherocity(0), fHistMassK0S(0), fHistFirstNPionTPCdEdx(0), fHistFirstPPionTPCdEdx(0), fHistSecondTPCdEdx(0), fHistSecondMassTOFvsPt3sTPC(0) { cout<<"I'm taking this dummy constructor!!!!"<<endl; fPDGMassSecond = TDatabasePDG::Instance()->GetParticle(fPDGcodeSecond)->Mass(); fPDGMassFirst = TDatabasePDG::Instance()->GetParticle(fPDGcodeFirst)->Mass(); // default constructor, don't allocate memory here! // this is used by root for IO purpos, it needs to remain empty } //_____________________________________________________________________________ AliAnalysisTaskK0SPFemto::AliAnalysisTaskK0SPFemto(const char* name) : AliAnalysisTaskSE(name), Neventi(0), fEventCuts(0), fAOD(NULL), fIsMC(kFALSE), fPIDResponse(0), farrGT(0), fTrackBufferSize(20200), // was 18k fEventColl(0x0), fEvt(0x0), fMaxFirstMult(20),//(3000), // 1000 for protons fMaxSecondMult(1000),//(20), // was 100 fzVertexBins(10), fnCentBins(20), fnEventsToMix(7), fFilterBit(4), fPDGMassFirst(0.), fPDGcodeFirst(310), fPDGMassSecond(0.), fPDGcodeSecond(2212), fHMtrigger(kFALSE), fHistSparseSignal(0), fHistSparseBkg(0), fOutputContainer(NULL), tCentrality(0), tSphericity(0), tSpherocity(0), tKtpair(0), tkStar(0), tIsCommonParton(0), tPtV0(0), tPTotV0(0), tThetaV0(0), tPhiV0(0), tDcaPosV0(0), tDcaNegV0(0), tInvMassK0s(0), tInvMassLambda(0), tInvMassAntiLambda(0), tCosPointingAngleV0(0), tPtP(0), tPTotP(0), tThetaP(0), tPhiP(0), tSignP(0), tDCAxyP(0), tDCAzP(0), tMassTOFP(0), tMCtruepair(0), tMCSameMother(0), tMCMotherV0(0), tMCMotherP(0), tMCptcTypeV0(0), tMCptcTypeP(0), tMCSameGM(0), tMotherPDG(0), tpdgcodeV0(0), tpdgcodeP(0), tKstarGen(0), fHistEventMultiplicity(0), fHistCentrality(0), fHistVertexDistribution(0), fHistSphericity(0), fHistSpherocity(0), fHistMassK0S(0), fHistFirstNPionTPCdEdx(0), fHistFirstPPionTPCdEdx(0), fHistSecondTPCdEdx(0), fHistSecondMassTOFvsPt3sTPC(0) { cout<<"real constructor"<<endl; fPDGMassSecond = TDatabasePDG::Instance()->GetParticle(fPDGcodeSecond)->Mass(); fPDGMassFirst = TDatabasePDG::Instance()->GetParticle(fPDGcodeFirst)->Mass(); // constructor DefineInput(0, TChain::Class()); // define the input of the analysis: in this case we take a 'chain' of events // this chain is created by the analysis manager, so no need to worry about it, // it does its work automatically DefineOutput(1, TList::Class()); // define the ouptut of the analysis: in this case it's a list of histograms // you can add more output objects by calling DefineOutput(2, classname::Class()) // if you add more output objects, make sure to call PostData for all of them, and to // make changes to your AddTask macro! DefineOutput(2, TTree::Class()); DefineOutput(3, TTree::Class()); } //_____________________________________________________________________________ AliAnalysisTaskK0SPFemto::~AliAnalysisTaskK0SPFemto() { // destructor if (fOutputContainer){ delete fOutputContainer; fOutputContainer = 0x0; } if(fHistSparseSignal){ delete fHistSparseSignal; fHistSparseSignal = 0x0; } if(fHistSparseBkg) { delete fHistSparseBkg; fHistSparseBkg = 0x0; } if (farrGT) delete[] farrGT; farrGT=0; for (unsigned short i=0; i<fzVertexBins; i++) { for (unsigned short j=0; j<fnCentBins; j++) { delete fEventColl[i][j]; } delete[] fEventColl[i]; } delete[] fEventColl; } //_____________________________________________________________________________ void AliAnalysisTaskK0SPFemto::UserCreateOutputObjects() { // create output objects // // this function is called ONCE at the start of your analysis (RUNTIME) // here you ceate the histograms that you want to use // // the histograms are in this case added to a tlist, this list is in the end saved // to an output event // //file collection OpenFile(1); fEventColl = new AliAnalysisK0SPEventCollection **[fzVertexBins]; for (unsigned short i=0; i<fzVertexBins; i++) { fEventColl[i] = new AliAnalysisK0SPEventCollection *[fnCentBins]; for (unsigned short j=0; j<fnCentBins; j++) { fEventColl[i][j] = new AliAnalysisK0SPEventCollection(fnEventsToMix+1, fMaxFirstMult, fMaxSecondMult); } } // Store pointer to global tracks farrGT = new Int_t[fTrackBufferSize]; //Define and fill the OutputContainer fOutputContainer = new TList(); fOutputContainer->SetOwner(kTRUE); // Create histograms fHistEventMultiplicity = new TH1F( "fHistEventMultiplicity" , "Nb of Events" , 13 , 0.5,13.5); fHistEventMultiplicity->GetXaxis()->SetBinLabel(1,"All Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(2,"Events w/PV and PID response"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(3,"Events w/|Vz|<10cm"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(4,"Centrality acc"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(5,"w/o PileUp"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(6,"Any Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(7,"Central Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(8,"Semi-Central Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(9,"MB Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(10,"kInt7 Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(11,"HM Events"); fHistEventMultiplicity->GetXaxis()->SetBinLabel(12,"Is Selected Events"); fOutputContainer->Add(fHistEventMultiplicity); // hmult = new TH1I("hmult","Multiplicity distribution (after cuts on event)",30000,-0.5,2999.5); // hmult->GetXaxis()->SetTitle("Number of tracklets"); // fOutputContainer->Add(hmult); fHistCentrality = new TH1F("fHistCentrality", "Number of events", 10001, -0.5, 100.5); fHistCentrality ->GetXaxis()->SetTitle("Centrality"); fHistCentrality ->GetYaxis()->SetTitle("Entries"); fOutputContainer->Add(fHistCentrality); fHistVertexDistribution = new TH1F("fHistVertexDistribution", "Primary vertex distribution", 40, -20., 20.); fHistVertexDistribution ->GetXaxis()->SetTitle("z_{v} (cm)"); fHistVertexDistribution ->GetYaxis()->SetTitle("Entries"); fOutputContainer->Add(fHistVertexDistribution); fHistSphericity = new TH1F("fHistSphericity", "Sphericity Distribution", 40, 0., 1.); fHistSphericity ->GetXaxis()->SetTitle("Sphericity"); fHistSphericity ->GetYaxis()->SetTitle("Entries"); fOutputContainer->Add(fHistSphericity); fHistSpherocity = new TH1F("fHistSpherocity", "Spherocity Distribution", 40, 0., 1.); fHistSpherocity ->GetXaxis()->SetTitle("Spherocity"); fHistSpherocity ->GetYaxis()->SetTitle("Entries"); fOutputContainer->Add(fHistSpherocity); fHistMassK0S = new TH1F("fHistMassK0S", "MassK0S Distribution", 800, 0.3, 0.7); fHistMassK0S ->GetXaxis()->SetTitle("MassK0S"); fHistMassK0S ->GetYaxis()->SetTitle("Entries"); fOutputContainer->Add(fHistMassK0S); fHistFirstNPionTPCdEdx = new TH2F("fHistFirstNPionTPCdEdx", "fHistFirstNPionTPCdEdx", 400, -6.0, 6.0, 500, 0.0, 2000); fOutputContainer->Add(fHistFirstNPionTPCdEdx); fHistFirstPPionTPCdEdx = new TH2F("fHistFirstPPionTPCdEdx", "fHistFirstPPionTPCdEdx", 400, -6.0, 6.0, 500, 0.0, 2000); fOutputContainer->Add(fHistFirstPPionTPCdEdx); fHistSecondTPCdEdx = new TH2F("fHistSecondTPCdEdx", "fHistSecondTPCdEdx", 400, -6.0, 6.0, 500, 0.0, 2000); fOutputContainer->Add(fHistSecondTPCdEdx); fHistSecondMassTOFvsPt3sTPC = new TH2F("fHistSecondMassTOFvsPt3sTPC", "fHistSecondMassTOFvsPt3sTPC", 200, -2.5, 2.5, 200, -10.0, 10); fOutputContainer->Add(fHistSecondMassTOFvsPt3sTPC); fEventCuts.AddQAplotsToList(fOutputContainer); PostData(1, fOutputContainer); OpenFile(2); fHistSparseSignal = new TTree("fHistSparseSignal","fHistSparseSignal"); /*1 */ fHistSparseSignal->Branch("tSignP",&tSignP,"tSignP/I"); /*2 */ fHistSparseSignal->Branch("tCentrality",&tCentrality,"tCentrality/F"); /*3 */ fHistSparseSignal->Branch("tDcaPosV0",&tDcaPosV0,"tDcaPosV0/F"); /*4 */ fHistSparseSignal->Branch("tDcaNegV0",&tDcaNegV0,"tDcaNegV0/F"); /*5 */ fHistSparseSignal->Branch("tDCAxyP",&tDCAxyP,"tDCAxyP/F"); /*6 */ fHistSparseSignal->Branch("tDCAzP",&tDCAzP,"tDCAzP/F"); /*7 */ fHistSparseSignal->Branch("tKtpair",&tKtpair,"tKtpair/F"); /*8 */ fHistSparseSignal->Branch("tkStar",&tkStar,"tkStar/F"); /*9 */ fHistSparseSignal->Branch("tPtV0",&tPtV0,"tPtV0/F"); /*10*/ fHistSparseSignal->Branch("tPtP",&tPtP,"tPtP/F"); /*11*/ fHistSparseSignal->Branch("tSphericity",&tSphericity,"tSphericity/F"); /*12*/ fHistSparseSignal->Branch("tSpherocity",&tSpherocity,"tSpherocity/F"); /*13*/ fHistSparseSignal->Branch("tInvMassK0s",&tInvMassK0s,"tInvMassK0s/F"); /*14*/ fHistSparseSignal->Branch("tInvMassLambda",&tInvMassLambda,"tInvMassLambda/F"); /*15*/ fHistSparseSignal->Branch("tInvMassAntiLambda",&tInvMassAntiLambda,"tInvMassAntiLambda/F"); /*16*/ fHistSparseSignal->Branch("tCosPointingAngleV0",&tCosPointingAngleV0,"tCosPointingAngleV0/F"); /*17*/ fHistSparseSignal->Branch("tThetaV0",&tThetaV0,"tThetaV0/F"); /*18*/ fHistSparseSignal->Branch("tThetaP",&tThetaP,"tThetaP/F"); /*19*/ fHistSparseSignal->Branch("tPhiV0",&tPhiV0,"tPhiV0/F"); /*20*/ fHistSparseSignal->Branch("tPhiP",&tPhiP,"tPhiP/F"); /*21*/ fHistSparseSignal->Branch("tMassTOFP",&tMassTOFP,"tMassTOFP/F"); if (fIsMC) { /*22*/ fHistSparseSignal->Branch("tMCtruepair",&tMCtruepair,"tMCtruepair/I"); /*23*/ fHistSparseSignal->Branch("tMCSameMother",&tMCSameMother,"tMCSameMother/I"); /*24*/ fHistSparseSignal->Branch("tMCMotherV0",&tMCMotherV0,"tMCMotherV0/I"); /*25*/ fHistSparseSignal->Branch("tMCMotherP",&tMCMotherP,"tMCMotherP/I"); /*26*/ fHistSparseSignal->Branch("tMCptcTypeV0",&tMCptcTypeV0,"tMCptcTypeV0/I"); /*27*/ fHistSparseSignal->Branch("tMCptcTypeP",&tMCptcTypeP,"tMCptcTypeP/I"); /*28*/ fHistSparseSignal->Branch("tIsCommonParton",&tIsCommonParton,"tIsCommonParton/O"); /*29*/ fHistSparseSignal->Branch("tMCSameGM",&tMCSameGM,"tMCSameGM/I"); /*30*/ fHistSparseSignal->Branch("tMotherPDG",&tMotherPDG,"tMotherPDG/I"); /*31*/ fHistSparseSignal->Branch("tpdgcodeV0",&tpdgcodeV0,"tpdgcodeV0/I"); /*32*/ fHistSparseSignal->Branch("tpdgcodeP",&tpdgcodeP,"tpdgcodeP/I"); /*33*/ fHistSparseSignal->Branch("tKstarGen",&tKstarGen,"tKstarGen/F"); } fHistSparseSignal->SetAutoSave(100000000); PostData(2,fHistSparseSignal); OpenFile(3); fHistSparseBkg = new TTree("fHistSparseBkg","fHistSparseBkg"); /*1 */ fHistSparseBkg->Branch("tSignP",&tSignP,"tSignP/I"); /*2 */ fHistSparseBkg->Branch("tCentrality",&tCentrality,"tCentrality/F"); /*3 */ fHistSparseBkg->Branch("tDcaPosV0",&tDcaPosV0,"tDcaPosV0/F"); /*4 */ fHistSparseBkg->Branch("tDcaNegV0",&tDcaNegV0,"tDcaNegV0/F"); /*5 */ fHistSparseBkg->Branch("tDCAxyP",&tDCAxyP,"tDCAxyP/F"); /*6 */ fHistSparseBkg->Branch("tDCAzP",&tDCAzP,"tDCAzP/F"); /*7 */ fHistSparseBkg->Branch("tKtpair",&tKtpair,"tKtpair/F"); /*8 */ fHistSparseBkg->Branch("tkStar",&tkStar,"tkStar/F"); /*9 */ fHistSparseBkg->Branch("tPtV0",&tPtV0,"tPtV0/F"); /*10*/ fHistSparseBkg->Branch("tPtP",&tPtP,"tPtP/F"); /*11*/ fHistSparseBkg->Branch("tSphericity",&tSphericity,"tSphericity/F"); /*12*/ fHistSparseBkg->Branch("tSpherocity",&tSpherocity,"tSpherocity/F"); /*13*/ fHistSparseBkg->Branch("tInvMassK0s",&tInvMassK0s,"tInvMassK0s/F"); /*14*/ fHistSparseBkg->Branch("tInvMassLambda",&tInvMassLambda,"tInvMassLambda/F"); /*15*/ fHistSparseBkg->Branch("tInvMassAntiLambda",&tInvMassAntiLambda,"tInvMassAntiLambda/F"); /*16*/ fHistSparseBkg->Branch("tCosPointingAngleV0",&tCosPointingAngleV0,"tCosPointingAngleV0/F"); /*17*/ fHistSparseBkg->Branch("tThetaV0",&tThetaV0,"tThetaV0/F"); /*18*/ fHistSparseBkg->Branch("tThetaP",&tThetaP,"tThetaP/F"); /*19*/ fHistSparseBkg->Branch("tPhiV0",&tPhiV0,"tPhiV0/F"); /*20*/ fHistSparseBkg->Branch("tPhiP",&tPhiP,"tPhiP/F"); /*21*/ fHistSparseBkg->Branch("tMassTOFP",&tMassTOFP,"tMassTOFP/F"); if (fIsMC) { /*22*/ fHistSparseBkg->Branch("tMCtruepair",&tMCtruepair,"tMCtruepair/I"); /*23*/ fHistSparseBkg->Branch("tMCSameMother",&tMCSameMother,"tMCSameMother/I"); /*24*/ fHistSparseBkg->Branch("tMCMotherV0",&tMCMotherV0,"tMCMotherV0/I"); /*25*/ fHistSparseBkg->Branch("tMCMotherP",&tMCMotherP,"tMCMotherP/I"); /*26*/ fHistSparseBkg->Branch("tMCptcTypeV0",&tMCptcTypeV0,"tMCptcTypeV0/I"); /*27*/ fHistSparseBkg->Branch("tMCptcTypeP",&tMCptcTypeP,"tMCptcTypeP/I"); /*28*/ fHistSparseBkg->Branch("tIsCommonParton",&tIsCommonParton,"tIsCommonParton/O"); /*29*/ fHistSparseBkg->Branch("tMCSameGM",&tMCSameGM,"tMCSameGM/I"); /*30*/ fHistSparseBkg->Branch("tMotherPDG",&tMotherPDG,"tMotherPDG/I"); /*31*/ fHistSparseBkg->Branch("tpdgcodeV0",&tpdgcodeV0,"tpdgcodeV0/I"); /*32*/ fHistSparseBkg->Branch("tpdgcodeP",&tpdgcodeP,"tpdgcodeP/I"); /*33*/ fHistSparseBkg->Branch("tKstarGen",&tKstarGen,"tKstarGen/F"); } fHistSparseBkg->SetAutoSave(100000000); PostData(3, fHistSparseBkg ); } //____________________________________________________________________________ void AliAnalysisTaskK0SPFemto::UserExec(Option_t *) { // Main loop // Called for each event // if(Neventi>=1000) // { // PostData(1, fOutputContainer); // PostData(2, fHistSparseSignal ); // PostData(3, fHistSparseBkg ); // return; // } // Neventi+=1; // cout<<"Evento numero:"<<Neventi<<endl; AliVVertex *vertexmain =0x0; //RA// AliCentrality* centrality = 0x0; AliMultSelection* centrality = 0x0; Double_t lBestPrimaryVtxPos[3] = {-100.0, -100.0, -100.0}; fHistEventMultiplicity->Fill(1); AliMCEvent *lMCevent = 0x0; AliStack *lMCstack = 0x0; TClonesArray *arrayMC = 0x0; Int_t ntracks = 0; fAOD = dynamic_cast<AliAODEvent*>( InputEvent() ); //RA// cout<<"fAOD: "<<fAOD <<endl; if (!fAOD) { AliWarning("ERROR: AODevent not available \n"); PostData(1, fOutputContainer); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } if(fHMtrigger == kTRUE){ //modify trigger in event selection fEventCuts.OverrideAutomaticTriggerSelection(AliVEvent::kHighMultV0); //fEventCuts.OverrideAutomaticTriggerSelection(AliVEvent::kAnyINT); } /// Use the event cut class to apply the required selections if (!fEventCuts.AcceptEvent(fAOD)) { PostData(1, fOutputContainer); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } ntracks = fAOD->GetNumberOfTracks(); const AliAODVertex *lPrimaryBestAODVtx = fAOD->GetPrimaryVertex(); if (!lPrimaryBestAODVtx){ AliWarning("No prim. vertex in AOD... return!"); PostData(1, fOutputContainer); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } vertexmain = (AliVVertex*) lPrimaryBestAODVtx; lPrimaryBestAODVtx->GetXYZ( lBestPrimaryVtxPos ); if (fIsMC) { //RA// Printf("Reading MC truth!!! \n"); arrayMC = (TClonesArray*) fAOD->GetList()->FindObject(AliAODMCParticle::StdBranchName()); if (!arrayMC) AliFatal("Error: MC particles branch not found!\n"); } // PID object AliAnalysisManager *man=AliAnalysisManager::GetAnalysisManager(); AliInputEventHandler* inputHandler = (AliInputEventHandler*) (man->GetInputEventHandler()); UInt_t mask = inputHandler->IsEventSelected(); /*RA// to see how many events are rejected if (!(mask & 0xffffffff)) { PostData(1,fOutputContainer ); return; } */ fPIDResponse = inputHandler->GetPIDResponse(); if(!fPIDResponse) { PostData(1,fOutputContainer ); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); AliError("Cannot get pid response"); return; } fHistEventMultiplicity->Fill(2); if((TMath::Abs(lBestPrimaryVtxPos[2])) > 10.) { PostData(1, fOutputContainer); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } fHistEventMultiplicity->Fill(3); //event must not be tagged as pileup Bool_t isPileUpSpd=kFALSE; isPileUpSpd=fAOD->IsPileupFromSPD(); if(isPileUpSpd){ PostData(1,fOutputContainer ); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } fHistEventMultiplicity->Fill(4); Float_t lcentrality = -99.; // AliMultSelection* centrality = 0x0; centrality = (AliMultSelection *) fAOD->FindListObject("MultSelection"); // cout<<"centrality: "<<centrality<<endl; lcentrality = centrality->GetMultiplicityPercentile("V0M"); //FIXME : Also for pp? Test on kd // cout<<"Centrality: "<<lcentrality<<endl; // hmult->Fill(lcentrality); // cout<<"centrality: "<<lcentrality<<endl; if ( lcentrality > 199 ){ //Event didn't pass Event Selections PostData(1,fOutputContainer ); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } // if (lcentrality<fCentrLowLim||lcentrality>=fCentrUpLim){ //**AGGIUNGERE fCentrLowLim, fCentrUpLim // PostData(1,fOutputContainer ); // PostData(2, fHistSparseSignal ); // PostData(3, fHistSparseBkg ); // return; // } fHistEventMultiplicity->Fill(5); // Bool_t isSelectedCentral = kFALSE; // Bool_t isSelectedSemiCentral = kFALSE; // Bool_t isSelectedMB = kFALSE; Bool_t isSelectedInt7 = kFALSE; Bool_t isSelectedHM = kFALSE; // Bool_t isSelectedAny = kFALSE; Bool_t isSelected = kFALSE; // isSelectedCentral = (mask & AliVEvent::kCentral); // isSelectedSemiCentral = (mask & AliVEvent::kSemiCentral); // isSelectedMB = (mask & AliVEvent::kMB); isSelectedInt7 = (mask & AliVEvent::kINT7); isSelectedHM = (mask & AliVEvent::kHighMultV0); // isSelectedAny = (mask & AliVEvent::kAnyINT); // if(fYear == 2010 && isSelectedMB ) // isSelected = kTRUE; if( fHMtrigger == kFALSE && isSelectedInt7) isSelected = kTRUE; else if( fHMtrigger == kTRUE && isSelectedHM) isSelected = kTRUE; else isSelected = kFALSE; //cout<<isSelectedAny<<" "<<isSelectedInt7<<" "<<isSelectedHM<<endl; // if(isSelectedAny) // fHistEventMultiplicity->Fill(6); // if(isSelectedCentral) // fHistEventMultiplicity->Fill(7); // if(isSelectedSemiCentral) // fHistEventMultiplicity->Fill(8); // if(isSelectedMB) // fHistEventMultiplicity->Fill(9); if(isSelectedInt7) fHistEventMultiplicity->Fill(10); if(isSelectedHM) fHistEventMultiplicity->Fill(11); //RA// cout<<"Trigger mask: "<<fAOD->GetTriggerMask()<<" "<<AliVEvent::kHighMultV0<<endl; //RA// cout<<"Trigger mask: "<<mask<<" "<<AliVEvent::kHighMultV0<<endl; //RA// cout<<"Event type : "<<fAOD->GetEventType()<<endl; //RA// FIXME : event selection to be added.. DONE if(!isSelected){ PostData(1, fOutputContainer); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); return; } fHistEventMultiplicity->Fill(12); // is event selected for the analysis //hmult->Fill(((AliAODHeader * )fAOD->GetHeader())->GetRefMultiplicityComb08()); // cout<<"nTracks: "<<ntracks<<" centrality "<<lcentrality<<endl; Double_t fSphericityvalue = CalculateSphericityofEvent(fAOD); Double_t fSpherocityvalue = CalculateSpherocityEvent(fAOD); fHistSphericity->Fill(fSphericityvalue); fHistSpherocity->Fill(fSpherocityvalue); fHistCentrality->Fill(lcentrality); fHistVertexDistribution->Fill(lBestPrimaryVtxPos[2]); const Float_t bfield = (InputEvent())->GetMagneticField(); int fieldsign; if (bfield >=0.) fieldsign = 1; else fieldsign = -1; // Store the event in the buffer to do mixing // ... find vertex... int zBin=0; double zStep=2*10/double(fzVertexBins), zStart=-10.; for (int i=0; i<fzVertexBins; i++) { if ((lBestPrimaryVtxPos[2] > zStart+i*zStep) && (lBestPrimaryVtxPos[2] < zStart+(i+1)*zStep)) { zBin=i; break; } } //CENTRALITY!! // ... and centrality //FIXME : find out how centrality wokrs in AOD in pp and pPb int centralityBin=0; // this should be valid for centrality... if(lcentrality < 0.01) centralityBin=19; else if(lcentrality < 0.1) centralityBin=18; else if(lcentrality < 0.5) centralityBin=17; else if(lcentrality < 1.0) centralityBin=16; else if(lcentrality < 5.0) centralityBin=15; else if(lcentrality < 10.) centralityBin=14; else if(lcentrality < 20.) centralityBin=13; else if(lcentrality < 30.) centralityBin=12; else if(lcentrality < 40.) centralityBin=11; else if(lcentrality < 50.) centralityBin=10; else if(lcentrality < 70.) centralityBin=9; else if(lcentrality <= 100.) centralityBin=8; fEventColl[zBin][centralityBin]->FifoShift(); fEvt = fEventColl[zBin][centralityBin]->fEvt; //RA// printf("buffer size: %d\n",fTrackBufferSize); //RA// printf("ntracks: %d\n",ntracks); for (Int_t igt = 0; igt < fTrackBufferSize; igt++) farrGT[igt] = -1; AliAODTrack *globaltrack = 0x0; // Read and store global tracks to retrieve PID information for TPC only tracks for (Int_t igt = 0; igt < ntracks; igt++) { globaltrack = (AliAODTrack*) fAOD->GetTrack(igt); if (!globaltrack) continue; if (globaltrack->GetID()<0 ) continue; if (!globaltrack->IsOn(AliAODTrack::kTPCrefit)) continue; // there are such tracks with no TPC clusters // Check id is not too big for buffer if (globaltrack->GetID()>=fTrackBufferSize) { printf("Warning: track ID too big for buffer: ID: %d, buffer %d\n",globaltrack->GetID(),fTrackBufferSize); //fHistTrackBufferOverflow->Fill(1); continue; } if ( globaltrack->GetTPCNcls()<=0 ) { // such tracks do not have the TPC refit either, filter map is 2 --> ITS constrained // cout<<" No TPC cl for this global track!! "<<igt<<endl; // if (!globaltrack->IsOn(AliAODTrack::kTPCrefit)) cout<<" ... and also no tpc refit "<<globaltrack->GetFilterMap()<<endl; continue; } //cout<<" The array will contain "<<igt<<" , it contains "<<farrGT[globaltrack->GetID()]<<endl; // Warn if we overwrite a track if (farrGT[globaltrack->GetID()]>=0) { // Two tracks same id --> checked that it never happens // cout<<" Array already filled "<<farrGT[globaltrack->GetID()]<<endl; } else { farrGT[globaltrack->GetID()] = igt; // solution adopted in the femto framework // cout<<" Set array, now it contains "<<farrGT[globaltrack->GetID()]<<endl; } } globaltrack = 0x0; //**DICHIARAZIONE VARIABILI AliAODTrack *track = 0x0; AliVTrack *vtrackg = 0x0; AliVTrack *vtrack = 0x0; Float_t TPCNCrossedRows=0.; Float_t TPCNclsF=0.; Bool_t isTOFPIDok = kFALSE; Float_t nsigmaTOFs = 10.; Float_t nsigmaTPCs = 10.; Float_t probMis = 0.; Double32_t tTOF = 0.; Float_t beta = 0.; Float_t gamma = 0.; Float_t mass = 0.; Float_t length = 0; Float_t ptot = 0; Int_t label = 0; Int_t PDGcode=0; AliPIDResponse::EDetPidStatus statusTOF; Double_t expectedTimes[AliPID::kSPECIES]; Float_t dz[2] = {-99.,-99.}; Double_t dzg[2]= {-999.,-999.}; Double_t covarg[3]={-999.,-999.,-999.}; AliExternalTrackParam etp1; Float_t rapiditySecond = 0.; Short_t charge = -2; int sCount = 0; AliReconstructedSecondK0SP::MCSecondOrigin_t mcSecondOrigin = AliReconstructedSecondK0SP::kUnassigned; Bool_t isP = kFALSE; // particle Bool_t isaP = kFALSE; // anti-particle for (Int_t ip = 0; ip < ntracks; ip++) { vtrack = fAOD->GetTrack(ip); if (!vtrack) continue; track = dynamic_cast<AliAODTrack*>(vtrack); if(!track) AliFatal("Not a standard AOD"); if(!track->TestFilterBit(fFilterBit)) continue; TPCNCrossedRows = track->GetTPCNCrossedRows(); TPCNclsF = (Float_t)track->GetTPCNclsF(); if(TPCNCrossedRows<70) continue; if(TPCNclsF==0) continue; if(TPCNCrossedRows/TPCNclsF<0.8) continue; // Get the corresponding global track to use PID --> stored only for global tracks // // Check that the array fGTI isn't too small // for the track id if (-track->GetID()-1 >= fTrackBufferSize) { printf ("Exceeding buffer size!!"); continue; } if(fFilterBit == 128) vtrackg = fAOD->GetTrack(farrGT[-track->GetID()-1]); else vtrackg = track; if (!vtrackg) { printf ("No global info! iTrack %d, ID %d\n",ip,track->GetID()); continue; } if (farrGT[-track->GetID()-1]>=ntracks || farrGT[-track->GetID()-1]<0) { /*cout<<"This index is out of range!!"<<farrGT[-track->GetID()-1]<<endl;*/ continue;} globaltrack = dynamic_cast<AliAODTrack*>(vtrackg); if(!globaltrack) AliFatal("Not a standard AOD"); // cout<<" Filter map for the global track "<<globaltrack->GetFilterMap()<<" "<<globaltrack<<endl; // IP to PV of tracks dz[0] = -99.; dz[1] = -99.; track->GetImpactParameters(&dz[0], &dz[1]); //dz[0] = globaltrack->DCA(); // the TPC one should be applied the other biases the CF --> from Maciejs note --> FIXME to be checked //dz[1] = globaltrack->ZAtDCA(); // for those two lines check AliAODTrack.h // FIXME these two lines produce shifted distributions, known problem, asked Mac and Marian. // Btw dont propagate TPC constrained! // Double_t p[3]; track->GetXYZ(p); // first two elements of p give the same as above, ignore the third, those are original DCA of TPC only tracks (with or w/o constraint?) // cout<<"d xy "<<dz[0]<<"while value is for other "<<p[0]<<endl; // cout<<"d z "<<dz[1]<<"while value is for other "<<p[1]<<endl; etp1.CopyFromVTrack(vtrackg); etp1.PropagateToDCA(vertexmain,(InputEvent())->GetMagneticField(),100.,dzg,covarg); isP = kFALSE; isaP = kFALSE; charge = globaltrack->Charge(); if (charge>0) { isP = kTRUE; isaP = kFALSE; } else{ if(charge<0){ isP = kFALSE; isaP = kTRUE; } } nsigmaTPCs = fPIDResponse->NumberOfSigmasTPC(globaltrack, (AliPID::EParticleType)AliPID::kProton); if(std::abs(nsigmaTPCs) > 3 ) continue; if (TMath::Abs(globaltrack->Eta())> 0.8) continue; nsigmaTOFs = 10.; // be careful with those initialization probMis = 10.; rapiditySecond = 0.5*TMath::Log( (track->E(fPDGMassSecond) + track->Pz()) / (track->E(fPDGMassSecond) - track->Pz() +1.e-13)); //TOF statusTOF = fPIDResponse->CheckPIDStatus(AliPIDResponse::kTOF,globaltrack); // this checks kTOFout and kTIMEi https://twiki.cern.ch/twiki/bin/viewauth/ALICE/TOF tTOF = 0.0; isTOFPIDok = kFALSE; if ((statusTOF == AliPIDResponse::kDetPidOk)) { nsigmaTOFs = fPIDResponse->NumberOfSigmasTOF(globaltrack, (AliPID::EParticleType)AliPID::kProton); isTOFPIDok = kTRUE; probMis = fPIDResponse->GetTOFMismatchProbability(globaltrack); tTOF = globaltrack->GetTOFsignal()-fPIDResponse->GetTOFResponse().GetStartTime(globaltrack->P()); globaltrack->GetIntegratedTimes(expectedTimes); } else { probMis = 1.; nsigmaTOFs = 10.; //cout<<"The corresponding global track has no tof pid!"<<endl; } // // HERE IS THE PID! // // Bool_t isMCsecond = kFALSE; Int_t MCptcCodeP2 = -999; Int_t MCmumIDP2 = -999; Int_t MCmumPDGP2 = -999; Int_t MCgrammaPDGP2 = -999; Int_t MCgrammaIDP2 = -999; AliAODMCParticle *tparticle = 0x0; AliAODMCParticle *AncParticle[50]={0}; Int_t AncPdg[50]={0}; Int_t AncParticleLabel[50]={0}; Int_t *AncParticleLabelnew; if(fIsMC == kTRUE) { label = track->GetLabel(); tparticle = (AliAODMCParticle*)arrayMC->At(TMath::Abs(label)); PDGcode = tparticle->GetPdgCode(); if(TMath::Abs(PDGcode)==fPDGcodeSecond) { isMCsecond= kTRUE; //cout<<"Label: "<<label<<" "<<PDGcode<<endl; Int_t mcMotherLabel = tparticle->GetMother(); Int_t mcMotherPdg = 0; AliAODMCParticle *mcMother = (AliAODMCParticle*)arrayMC->At(mcMotherLabel); Int_t mcGrandMotherLabel = mcMother->GetMother(); Int_t mcGrandMotherPdg = 0; AliAODMCParticle *mcGrandMother = (AliAODMCParticle*)arrayMC->At(mcGrandMotherLabel); if(mcMotherLabel < 0) {mcMotherPdg = 0;} else {mcMotherPdg = mcMother->GetPdgCode();} if(mcGrandMotherLabel < 0){mcGrandMotherPdg=0;}else{mcGrandMotherPdg = mcGrandMother->GetPdgCode();} //cout<<"mcMotherlabel: "<<mcMotherLabel<<endl; // Mum id MCmumIDP2 = mcMotherLabel; MCmumPDGP2 = mcMotherPdg; MCgrammaIDP2 = mcGrandMotherLabel; MCgrammaPDGP2 = mcGrandMotherPdg; //cout<<"1:------------------------------MCmumIDP2: "<<MCmumIDP2<<" ----------------------------------MCmumPDGP2: "<<MCmumPDGP2<<endl; if (tparticle->IsPhysicalPrimary()) MCptcCodeP2 = 1; else if (tparticle->IsSecondaryFromMaterial()) MCptcCodeP2 = 2; else if (tparticle->IsSecondaryFromWeakDecay()) MCptcCodeP2 = 3; else MCptcCodeP2 = 4; /* try to get info about origin of particles (beginning)*/ AncParticle[0]=tparticle; //assegna solo la particella 0 che è quella di partenza AncPdg[0]= AncParticle[0]->GetPdgCode(); AncParticleLabel[0]= AncParticle[0]->GetLabel(); for (Int_t i=0; i<49; i++) { AncParticleLabel[i+1]=AncParticle[i]->GetMother();//va salvato per ogni k0s selezionata AncParticle[i+1] = static_cast<AliAODMCParticle*>(arrayMC-> At(TMath::Abs(AncParticleLabel[i+1]))); //per i=0 la particella vparticle[1] diventa la madre della vparticle[0] e avanti così //così si riempie un array di antenate fino alla prima particella generatrice AncPdg[i+1] = AncParticle[i+1]->GetPdgCode(); //salvato per ogni k0s selezionata (quindi aggiungi a feventcoll) if ((AncParticleLabel[i] ==1 || AncParticleLabel[i] ==-1) && (AncPdg[i]==2212) && (AncParticleLabel[i] == AncParticleLabel[i+1] )) //**boh break; } /* try to get info about origin of particles (end)*/ } }//end of MC loop if(isTOFPIDok && tTOF > 0.) { length = globaltrack->GetIntegratedLength(); // // FIXME length is zero!! from a mail february 2014: this info is not available for AODs 115, use AODs 145 ptot = globaltrack->GetTPCmomentum(); //if (probMis > 0.01) continue; //if (TMath::Sqrt(nsigmaTOFs*nsigmaTOFs+nsigmaTPCs*nsigmaTPCs)> fnSigmaTPCTOFPIDsecondParticle) continue; // this cleans the TOF corrected time plot vs p //FIXME : Original line from mariella beta = length/(tTOF*2.99792457999999984e-02); //cout<<" rack length "<<length<<" beta "<<beta<<endl; gamma = 1/TMath::Sqrt(1 - beta*beta); mass = ptot/TMath::Sqrt(gamma*gamma - 1); // using inner TPC mom. as approx. //cout<<"ptc1: "<<length<<" "<<ptot<<" "<<mass<<endl; } else { mass=-99.; } if(mass-fPDGMassSecond<-0.2 || mass-fPDGMassSecond>0.2) continue; //taglio su massa tof if(fIsMC == kTRUE) fHistSecondTPCdEdx->Fill(globaltrack->Pt()*charge, globaltrack->GetTPCsignal()); else if(fIsMC == kFALSE) fHistSecondTPCdEdx->Fill(globaltrack->GetTPCmomentum()*charge, globaltrack->GetTPCsignal()); fHistSecondMassTOFvsPt3sTPC->Fill(mass-fPDGMassSecond,charge*track->Pt()); //fHistSecondMassTOFvsPt3sTPC3sTOF->Fill(mass-fPDGMassSecond,charge*track->Pt()); //------------------ Save second particle information fEvt->fReconstructedSecond[sCount].sCharge = charge; if(fIsMC == kTRUE){ fEvt->fReconstructedSecond[sCount].sMomentumTruth[0] = tparticle->Px(); fEvt->fReconstructedSecond[sCount].sMomentumTruth[1] = tparticle->Py(); fEvt->fReconstructedSecond[sCount].sMomentumTruth[2] = tparticle->Pz(); fEvt->fReconstructedSecond[sCount].sAncestorParticleLabel = AncParticleLabel; fEvt->fReconstructedSecond[sCount].sAncestorPdg = AncPdg; }else{ fEvt->fReconstructedSecond[sCount].sMomentumTruth[0] = 0.; fEvt->fReconstructedSecond[sCount].sMomentumTruth[1] = 0.; fEvt->fReconstructedSecond[sCount].sMomentumTruth[2] = 0.; } fEvt->fReconstructedSecond[sCount].sMomentum[0] = track->Px(); fEvt->fReconstructedSecond[sCount].sMomentum[1] = track->Py(); fEvt->fReconstructedSecond[sCount].sMomentum[2] = track->Pz(); fEvt->fReconstructedSecond[sCount].sPt = track->Pt(); fEvt->fReconstructedSecond[sCount].sEta = track->Eta(); fEvt->fReconstructedSecond[sCount].sPhi = track->Phi(); fEvt->fReconstructedSecond[sCount].sTheta = track->Theta(); fEvt->fReconstructedSecond[sCount].sRap = rapiditySecond; fEvt->fReconstructedSecond[sCount].mcSecondOriginType = mcSecondOrigin; fEvt->fReconstructedSecond[sCount].isMCptc = isMCsecond; fEvt->fReconstructedSecond[sCount].sMCcode = MCptcCodeP2; fEvt->fReconstructedSecond[sCount].sPDGcode = PDGcode; // cout<<"2b:------------------------------MCmumIDP2: "<<MCmumIDP2<<" ----------------------------------MCmumPDGP2: "<<MCmumPDGP2<<endl; fEvt->fReconstructedSecond[sCount].sDCAxy = dzg[0]; fEvt->fReconstructedSecond[sCount].sDCAz = dzg[1]; fEvt->fReconstructedSecond[sCount].sMassTOF = mass; fEvt->fReconstructedSecond[sCount].sMCmumIdx = MCmumIDP2; fEvt->fReconstructedSecond[sCount].sMCmumPDG = MCmumPDGP2; fEvt->fReconstructedSecond[sCount].sMCgrandmumIdx = MCgrammaIDP2; fEvt->fReconstructedSecond[sCount].sMCgrandmumPDG = MCgrammaPDGP2; if (isP){ fEvt->fReconstructedSecond[sCount].isP = kTRUE; fEvt->fReconstructedSecond[sCount].isaP = kFALSE; } else if (isaP){ fEvt->fReconstructedSecond[sCount].isP = kFALSE; fEvt->fReconstructedSecond[sCount].isaP = kTRUE; } fEvt->fReconstructedSecond[sCount].index = TMath::Abs(globaltrack->GetID()); // Int_t *AncParticleLabelP=0x0; // Int_t *AncPdgP=0x0; // if(fIsMC && isMCsecond) // { // AncParticleLabelP = fEvt->fReconstructedSecond[sCount].sAncestorParticleLabel; // AncPdgP = fEvt->fReconstructedSecond[sCount].sAncestorPdg; // for(int j=0;j<50;j++) // cout<<AncParticleLabelP[j]<<" "<<AncPdgP[j]<<endl; // } sCount++; if (fMaxSecondMult <= sCount){ cerr<<"Proton counts exceeded "<<fMaxSecondMult<<"!"<<endl; break; } }//end track loop fEvt->fNumberCandidateSecond = sCount; Float_t ptrackTPCNCrossedRows; Float_t ntrackTPCNCrossedRows; Float_t ptrackTPCNclsF; Float_t ntrackTPCNclsF; Float_t rapidityFirst = 0.; int fCount = 0; AliReconstructedFirstK0SP::MCFirstOrigin_t mcFirstOrigin =AliReconstructedFirstK0SP::kUnassigned; Int_t fchargeN; Int_t fchargeP; Int_t iV0s = fAOD->GetNumberOfV0s(); for(Int_t i=0; i< iV0s; i++) { AliAODv0* V0 = static_cast<AliAODv0*>(fAOD->GetV0(i)); if(V0->GetOnFlyStatus())continue; AliAODTrack *pTrack=(AliAODTrack *)V0->GetDaughter(0); //0->Positive Daughter AliAODTrack *nTrack=(AliAODTrack *)V0->GetDaughter(1); //1->Negative Daughter fchargeP = pTrack->Charge(); fchargeN = nTrack->Charge(); if (std::abs(fPIDResponse->NumberOfSigmasTPC(pTrack, AliPID::kPion)) > 3 ) continue; if (std::abs(fPIDResponse->NumberOfSigmasTPC(nTrack, AliPID::kPion)) > 3 ) continue; if (std::abs(pTrack->Eta())>0.8) continue; if (std::abs(nTrack->Eta())>0.8) continue; ptrackTPCNCrossedRows = pTrack->GetTPCNCrossedRows(); ntrackTPCNCrossedRows = nTrack->GetTPCNCrossedRows(); ptrackTPCNclsF = (Double_t)pTrack->GetTPCNclsF(); ntrackTPCNclsF = (Double_t)nTrack->GetTPCNclsF(); if(ptrackTPCNCrossedRows<70 || ntrackTPCNCrossedRows<70) continue; if(ptrackTPCNclsF==0 || ntrackTPCNclsF==0) continue; if(ptrackTPCNCrossedRows/ptrackTPCNclsF<0.8 || ntrackTPCNCrossedRows/ntrackTPCNclsF<0.8) continue; if(V0->DcaV0Daughters()>0.8) continue; rapidityFirst=V0->RapK0Short(); if(TMath::Abs(rapidityFirst)>0.5) continue; if(0.4976*(V0->DecayLengthV0(lBestPrimaryVtxPos))/TMath::Sqrt(V0->Ptot2V0())>7*2.6844) continue; if(V0->DcaNegToPrimVertex()<0.1) continue; if(V0->DcaPosToPrimVertex()<0.1) continue; if(V0->CosPointingAngle(lBestPrimaryVtxPos)<0.95) continue; if(TMath::Abs(V0->MassLambda()-1.115683)<0.00125) continue; if(TMath::Abs(V0->MassAntiLambda()-1.115683)<0.00125) continue; fHistMassK0S->Fill(V0->MassK0Short()); Bool_t isMCfirst = kFALSE; Int_t MCptcCodeP1 = -999; //1 : primary 2: from weak decay 3: from material Int_t MCmumIDP1 = -999; Int_t MCmumPDGP1 = -999; Int_t MCgrammaPDGP1 = -999; Int_t MCgrammaIDP1 = -999; AliAODMCParticle *tparticle = 0x0; AliAODMCParticle *AncParticle[50]={0}; Int_t AncPdg[50]={0}; Int_t AncParticleLabel[50]={0}; if (fIsMC) { label = pTrack->GetLabel(); tparticle = (AliAODMCParticle*)arrayMC->At(TMath::Abs(label)); Int_t mcMotherLabel1 = tparticle->GetMother(); label = nTrack->GetLabel(); tparticle = (AliAODMCParticle*)arrayMC->At(TMath::Abs(label)); Int_t mcMotherLabel2 = tparticle->GetMother(); tparticle = (AliAODMCParticle*)arrayMC->At(TMath::Abs(mcMotherLabel2)); if(mcMotherLabel1!=mcMotherLabel2) PDGcode=0; else { PDGcode = tparticle->GetPdgCode(); } if(TMath::Abs(PDGcode)==fPDGcodeFirst) { isMCfirst= kTRUE; //cout<<"Label: "<<label<<" "<<PDGcode<<endl; Int_t mcMotherLabel = tparticle->GetMother(); Int_t mcMotherPdg = 0; AliAODMCParticle *mcMother = (AliAODMCParticle*)arrayMC->At(mcMotherLabel); Int_t mcGrandMotherLabel = mcMother->GetMother(); Int_t mcGrandMotherPdg = 0; AliAODMCParticle *mcGrandMother = (AliAODMCParticle*)arrayMC->At(mcGrandMotherLabel); if(mcMotherLabel < 0) {mcMotherPdg = 0;} else {mcMotherPdg = mcMother->GetPdgCode();} if(mcGrandMotherLabel < 0){mcGrandMotherPdg=0;}else{mcGrandMotherPdg = mcGrandMother->GetPdgCode();} //cout<<"mcMotherlabel: "<<mcMotherLabel<<endl; // Mum id MCmumIDP1 = mcMotherLabel; MCmumPDGP1 = mcMotherPdg; MCgrammaIDP1 = mcGrandMotherLabel; MCgrammaPDGP1 = mcGrandMotherPdg; //cout<<"1:------------------------------MCmumIDP1: "<<MCmumIDP1<<" ----------------------------------MCmumPDGP1: "<<MCmumPDGP1<<endl; if (tparticle->IsPhysicalPrimary()){ MCptcCodeP1 = 1; } else if (tparticle->IsSecondaryFromMaterial()){ MCptcCodeP1 = 2; } else if (tparticle->IsSecondaryFromWeakDecay()){ MCptcCodeP1 = 3; } else { MCptcCodeP1 = 4; //cout<<"-------------------Inside 4 loop !!!!!"<<endl; } /* try to get info about origin of particles (beginning)*/ AncParticle[0]=tparticle; //assegna solo la particella 0 che è quella di partenza AncPdg[0]= AncParticle[0]->GetPdgCode(); AncParticleLabel[0]= AncParticle[0]->GetLabel(); for (Int_t i=0; i<49; i++) { AncParticleLabel[i+1]=AncParticle[i]->GetMother();//va salvato per ogni k0s selezionata AncParticle[i+1] = static_cast<AliAODMCParticle*>(arrayMC-> At(TMath::Abs(AncParticleLabel[i+1]))); //per i=0 la particella particle[1] diventa la madre della particle[0] e avanti così //così si riempie un array di antenate fino alla prima particella generatrice AncPdg[i+1] = AncParticle[i+1]->GetPdgCode(); //salvato per ogni k0s selezionata (quindi aggiungi a feventcoll) if ((AncParticleLabel[i] ==1 || AncParticleLabel[i] ==-1) && (AncPdg[i]==2212) && (AncParticleLabel[i] == AncParticleLabel[i+1] )) //**boh break; } /* try to get info about origin of particles (end)*/ } }//end MC loop if(fIsMC == kTRUE) { fHistFirstNPionTPCdEdx->Fill(fchargeN*pTrack->Pt(), pTrack->GetTPCsignal()); fHistFirstPPionTPCdEdx->Fill(fchargeP*nTrack->Pt(), nTrack->GetTPCsignal()); } else if(fIsMC == kFALSE) { fHistFirstNPionTPCdEdx->Fill(fchargeN*pTrack->GetTPCmomentum(), pTrack->GetTPCsignal()); fHistFirstPPionTPCdEdx->Fill(fchargeP*nTrack->GetTPCmomentum(), nTrack->GetTPCsignal()); } if(fIsMC == kTRUE){ fEvt->fReconstructedFirst[fCount].fMomentumTruth[0] = tparticle->Px(); fEvt->fReconstructedFirst[fCount].fMomentumTruth[1] = tparticle->Py(); fEvt->fReconstructedFirst[fCount].fMomentumTruth[2] = tparticle->Pz(); fEvt->fReconstructedFirst[fCount].fAncestorParticleLabel = AncParticleLabel; fEvt->fReconstructedFirst[fCount].fAncestorPdg = AncPdg; }else{ fEvt->fReconstructedFirst[fCount].fMomentumTruth[0] = 0.; fEvt->fReconstructedFirst[fCount].fMomentumTruth[1] = 0.; fEvt->fReconstructedFirst[fCount].fMomentumTruth[2] = 0.; } fEvt->fReconstructedFirst[fCount].fMomentum[0] = V0->Px(); fEvt->fReconstructedFirst[fCount].fMomentum[1] = V0->Py(); fEvt->fReconstructedFirst[fCount].fMomentum[2] = V0->Pz(); fEvt->fReconstructedFirst[fCount].fPt = V0->Pt(); fEvt->fReconstructedFirst[fCount].fEta = V0->Eta(); fEvt->fReconstructedFirst[fCount].fPhi = V0->Phi(); fEvt->fReconstructedFirst[fCount].fTheta = V0->Theta(); fEvt->fReconstructedFirst[fCount].fRap = rapidityFirst; fEvt->fReconstructedFirst[fCount].mcFirstOriginType = mcFirstOrigin; fEvt->fReconstructedFirst[fCount].isMCptc = isMCfirst; fEvt->fReconstructedFirst[fCount].fMCcode = MCptcCodeP1; fEvt->fReconstructedFirst[fCount].fPDGcode = PDGcode; // cout<<"2b:------------------------------MCmumIDP1: "<<MCmumIDP1<<" ----------------------------------MCmumPDGP1: "<<MCmumPDGP1<<endl; fEvt->fReconstructedFirst[fCount].fDcaPosV0 = V0->DcaPosToPrimVertex(); fEvt->fReconstructedFirst[fCount].fDcaNegV0 = V0->DcaNegToPrimVertex(); fEvt->fReconstructedFirst[fCount].fInvMassK0s = V0->MassK0Short(); fEvt->fReconstructedFirst[fCount].fInvMassLambda = V0->MassLambda(); fEvt->fReconstructedFirst[fCount].fInvMassAntiLambda = V0->MassAntiLambda(); fEvt->fReconstructedFirst[fCount].fCosPointingAngle = V0->CosPointingAngle(lBestPrimaryVtxPos); fEvt->fReconstructedFirst[fCount].fMCmumIdx = MCmumIDP1; fEvt->fReconstructedFirst[fCount].fMCmumPDG = MCmumPDGP1; fEvt->fReconstructedFirst[fCount].fMCgrandmumIdx = MCgrammaIDP1; fEvt->fReconstructedFirst[fCount].fMCgrandmumPDG = MCgrammaPDGP1; fEvt->fReconstructedFirst[fCount].index = TMath::Abs(V0->GetID()); fEvt->fReconstructedFirst[fCount].indexPosdaughter = TMath::Abs(pTrack->GetID()); fEvt->fReconstructedFirst[fCount].indexNegdaughter = TMath::Abs(nTrack->GetID()); fCount++; if (fMaxFirstMult <= fCount){ cerr<<"K0S counts exceeded "<<fMaxFirstMult<<"!"<<endl; break; } }//end V0 loop fEvt->fNumberCandidateFirst = fCount; for (int i=0; i < fEvt->fNumberCandidateFirst; i++) { for (int j=0; j<fEvt->fNumberCandidateSecond; j++) { if (fEvt->fReconstructedFirst[i].indexPosdaughter == fEvt->fReconstructedSecond[j].index || fEvt->fReconstructedFirst[i].indexNegdaughter == fEvt->fReconstructedSecond[j].index) { //cout<<"the track can be both tracks!"<<endl; fEvt->fReconstructedFirst[i].doSkipOver = kTRUE; fEvt->fReconstructedSecond[j].doSkipOver = kTRUE; } } } //-------------------------------------------------------------- DoPairsh1h2(lcentrality, fieldsign,fSphericityvalue,fSpherocityvalue); // Post output data PostData(1, fOutputContainer); PostData(2, fHistSparseSignal ); PostData(3, fHistSparseBkg ); } //---------------------------------------------------------------------------------------------------- void AliAnalysisTaskK0SPFemto::DoPairsh1h2 ( const Float_t lcentrality, int fieldsign, const Double_t fSphericityvalue, Double_t fSpherocityvalue ) { //----------- double DcaPosV0 = -999. ; double DcaNegV0 = -999. ; double DCAxyP = -999. ; double DCAzP = -999. ; double ptV0 = -999.; double ptP = -999.; // Short_t chargeV0 = -999.; // Short_t chargeP = -999.; bool isV0 = kFALSE; bool isaV0 = kFALSE; bool isP = kFALSE; bool isaP = kFALSE; Int_t SignV0 = -999; Int_t SignP = -999; double phiP = -999.; double phiV0 = -999.; double thetaV0 = -999.; double thetaP = -999.; double MassTOFP = -999.; double InvMassK0s = -999.; double InvMassLambda = -999.; double InvMassAntiLambda = -999.; double CosPointingAngleV0 = -999.; bool isMC1 = kFALSE; bool isMC2 = kFALSE; bool isMCvector = kFALSE; bool sameMother = kFALSE; bool sameGrandMother = kFALSE; Int_t mcMotherLabelV0 = -999; Int_t mcMotherLabelP = -999; Int_t mcGrandMotherLabelV0 = -999; Int_t mcGrandMotherLabelP = -999; Int_t typeV0 = -999; Int_t typeP = -999; Int_t mcPDGMotherV0 = 0; Int_t mcPDGMotherP = 0; // Int_t mcMotherBin = 0; Int_t mcPDGGrandMother = 0; Int_t mcGrandMotherBin = 0; Int_t mcPDGcodeV0 = 0; Int_t mcPDGcodeP = 0; // Int_t mcPDG1Bin = 0; // Int_t mcPDG2Bin = 0; int evmultmixed = 0; bool multmixedcounted = kFALSE; double pairKstar = 0.; double pairKstarMC = 0.; double pairMass = 0.; double pairMassE = 0.; double pairKt = 0.; for (int i=0; i<fEvt->fNumberCandidateFirst; i++) { if (fEvt->fReconstructedFirst[i].doSkipOver) continue; DcaPosV0 = fEvt->fReconstructedFirst[i].fDcaPosV0; DcaNegV0 = fEvt->fReconstructedFirst[i].fDcaNegV0; ptV0 = fEvt->fReconstructedFirst[i].fPt; // chargeV0 = fEvt->fReconstructedFirst[i].fCharge; isMC1 = fEvt->fReconstructedFirst[i].isMCptc; mcMotherLabelV0 = fEvt->fReconstructedFirst[i].fMCmumIdx; typeV0 = fEvt->fReconstructedFirst[i].fMCcode; mcGrandMotherLabelV0 = fEvt->fReconstructedFirst[i].fMCgrandmumIdx; mcPDGcodeV0 = fEvt->fReconstructedFirst[i].fPDGcode; mcPDGMotherV0 = fEvt->fReconstructedFirst[i].fMCmumPDG; mcPDGGrandMother = fEvt->fReconstructedFirst[i].fMCgrandmumPDG; Int_t *AncParticleLabelV0=0x0; Int_t *AncPdgV0=0x0; if(fIsMC && isMC1) { AncParticleLabelV0 = fEvt->fReconstructedFirst[i].fAncestorParticleLabel; AncPdgV0 = fEvt->fReconstructedFirst[i].fAncestorPdg; // for(int mm=0;mm<50;mm++) // cout<<AncParticleLabelV0[mm]<<" "<<AncPdgV0[mm]<<endl; } InvMassK0s = fEvt->fReconstructedFirst[i].fInvMassK0s; InvMassLambda = fEvt->fReconstructedFirst[i].fInvMassLambda; InvMassAntiLambda = fEvt->fReconstructedFirst[i].fInvMassAntiLambda; CosPointingAngleV0 = fEvt->fReconstructedFirst[i].fCosPointingAngle; thetaV0 = fEvt->fReconstructedFirst[i].fTheta; phiV0 = fEvt->fReconstructedFirst[i].fPhi; for (int eventNumber=0; eventNumber<fnEventsToMix+1; eventNumber++) { if (!multmixedcounted && eventNumber!=0 && ((fEvt+eventNumber)->fNumberCandidateSecond)!=0) evmultmixed++; for (int j=0; j<(fEvt+eventNumber)->fNumberCandidateSecond; j++) { if ((fEvt+eventNumber)->fReconstructedSecond[j].doSkipOver) continue; DCAxyP = (fEvt+eventNumber)->fReconstructedSecond[j].sDCAxy; DCAzP = (fEvt+eventNumber)->fReconstructedSecond[j].sDCAz; ptP = (fEvt+eventNumber)->fReconstructedSecond[j].sPt; thetaP = (fEvt+eventNumber)->fReconstructedSecond[j].sTheta; phiP = (fEvt+eventNumber)->fReconstructedSecond[j].sPhi; MassTOFP = (fEvt+eventNumber)->fReconstructedSecond[j].sMassTOF; //chargeP = (fEvt+eventNumber)->fReconstructedSecond[j].sCharge; isMC2 = (fEvt+eventNumber)->fReconstructedSecond[j].isMCptc; mcMotherLabelP = (fEvt+eventNumber)->fReconstructedSecond[j].sMCmumIdx; typeP = (fEvt+eventNumber)->fReconstructedSecond[j].sMCcode; mcGrandMotherLabelP = (fEvt+eventNumber)->fReconstructedSecond[j].sMCgrandmumIdx; mcPDGcodeP = (fEvt+eventNumber)->fReconstructedSecond[j].sPDGcode; isP = (fEvt+eventNumber)->fReconstructedSecond[j].isP; isaP = (fEvt+eventNumber)->fReconstructedSecond[j].isaP; mcPDGMotherP = (fEvt+eventNumber)->fReconstructedSecond[j].sMCmumPDG; Int_t *AncParticleLabelP=0x0; Int_t *AncPdgP=0x0; if(fIsMC==kTRUE && isMC2==kTRUE) { AncParticleLabelP = (fEvt+eventNumber)->fReconstructedSecond[j].sAncestorParticleLabel; AncPdgP = (fEvt+eventNumber)->fReconstructedSecond[j].sAncestorPdg; // for(int mm=0;mm<50;mm++) // cout<<AncParticleLabelP[mm]<<" "<<AncPdgP[mm]<<endl; } if(isP) SignP = 1; else if (isaP) SignP = -1; if(isMC1 && isMC2) isMCvector = kTRUE; else isMCvector = kFALSE; if(mcMotherLabelV0 == mcMotherLabelP && mcMotherLabelV0!=-999)sameMother = kTRUE; else sameMother = kFALSE; if(mcGrandMotherLabelV0 == mcGrandMotherLabelP){sameGrandMother = kTRUE;} // cout<<"GM-------------------------------- chargeV0: "<<chargeV0<<" ----------- chargeP: "<<chargeP<<" ---------------------> "<<mcPDGGrandMother<<endl; if(TMath::Abs(mcPDGGrandMother)>= 1 && TMath::Abs(mcPDGGrandMother)<= 6) mcGrandMotherBin = 1; //quark else if(TMath::Abs(mcPDGGrandMother)==2212) mcGrandMotherBin = 2; //p else if(TMath::Abs(mcPDGGrandMother)==21) mcGrandMotherBin = 3; //g else if(TMath::Abs(mcPDGGrandMother)> 400 && TMath::Abs(mcPDGGrandMother)< 500 ) mcGrandMotherBin = 5; //D meson else if(mcPDGGrandMother!=0) { // cout<<"--------------------------------------------------------------------------------> "<<mcPDGMother<<endl; mcGrandMotherBin = 6; }// // else if(!sameMother) // mcMotherBin = 15; //cout<<"----------------- outside: "<<mcGrandMotherBin<<endl; //**QUI Bool_t IsCommonParton=kFALSE; if(fIsMC && isMCvector) { for (Int_t ii=1; ii<50; ii++) { //I start from one since last element cannot be a parton but is a hadron if (IsCommonParton==kTRUE) break; for (Int_t jj=1; jj<50; jj++) {//boh if ((AncParticleLabelV0[ii] == AncParticleLabelP[jj] ) && AncParticleLabelP[jj]!=0 && ( TMath::Abs(AncPdgV0[ii]) <=8 || TMath::Abs(AncPdgV0[ii]) ==21)) { //both Xi and Trigger particle have a common ancestor which has to be a quark or a gluon-> therefore te cascade comes form the jet defined by the trigger particle IsCommonParton =kTRUE; break; } } } } //cout<<IsCommonParton<<endl; //Calculate k* for the pair pairKstar = CalculateKstar(fEvt->fReconstructedFirst[i].fMomentum, (fEvt+eventNumber)->fReconstructedSecond[j].sMomentum,fPDGMassFirst, fPDGMassSecond); //mc kstar pairKstarMC = CalculateKstar(fEvt->fReconstructedFirst[i].fMomentumTruth, (fEvt+eventNumber)->fReconstructedSecond[j].sMomentumTruth,fPDGMassFirst, fPDGMassSecond); // //Invariant Mass of the pair //pairMass = CalculateMass(fEvt->fReconstructedFirst[i].fMomentum, (fEvt+eventNumber)->fReconstructedSecond[j].sMomentum,fPDGMassFirst, fPDGMassSecond); //pairMassE = CalculateMass(fEvt->fReconstructedFirst[i].fMomentum, (fEvt+eventNumber)->fReconstructedSecond[j].sMomentum,5.11e-4, 5.11e-4); // //Kt pairKt = pow(fEvt->fReconstructedFirst[i].fMomentum[0] + (fEvt+eventNumber)->fReconstructedSecond[j].sMomentum[0],2.); pairKt+= pow(fEvt->fReconstructedFirst[i].fMomentum[1] + (fEvt+eventNumber)->fReconstructedSecond[j].sMomentum[1],2.); pairKt = sqrt(pairKt)/2.; if (eventNumber==0) {//Same event pair histogramming tSignP = SignP; tCentrality = lcentrality; tDcaPosV0 = DcaPosV0; tDcaNegV0 = DcaNegV0; tDCAxyP = DCAxyP; tDCAzP = DCAzP; tKtpair = pairKt; tkStar = pairKstar; tPtV0 = ptV0; tPtP = ptP; tInvMassK0s = InvMassK0s; tInvMassLambda = InvMassLambda; tInvMassAntiLambda = InvMassAntiLambda; tCosPointingAngleV0 = CosPointingAngleV0; tThetaV0 = thetaV0; tThetaP = thetaP; tPhiV0 = phiV0; tPhiP = phiP; tMassTOFP = MassTOFP; // tDEta = deta; //**SERVONO? // tDPhiStar = dphis; // tDPhi = dphi; //tMassPair = pairMass; tSphericity = fSphericityvalue; tSpherocity = fSpherocityvalue; // tGammaCoversionMass= pairMassE; // tDTheta = dtheta; if(fIsMC == kTRUE){ tMCtruepair = isMCvector; tMCSameMother = sameMother; tMCMotherV0 = mcPDGMotherV0;//mcMotherBin; tMCMotherP = mcPDGMotherP;//mcMotherBin; tMCptcTypeV0 = typeV0 ; tMCptcTypeP = typeP ; tIsCommonParton = IsCommonParton; tMCSameGM = sameGrandMother; tMotherPDG = mcGrandMotherBin; tpdgcodeV0 = mcPDGcodeV0;//mcPDG1Bin; tpdgcodeP = mcPDGcodeP;//mcPDG2Bin; tKstarGen = pairKstarMC; } fHistSparseSignal->Fill(); } else {//Mixed-event pair histogramming tSignP = SignP; tCentrality = lcentrality; tDcaPosV0 = DcaPosV0; tDcaNegV0 = DcaNegV0; tDCAxyP = DCAxyP; tDCAzP = DCAzP; tKtpair = pairKt; tkStar = pairKstar; tPtV0 = ptV0; tPtP = ptP; tInvMassK0s = InvMassK0s; tInvMassLambda = InvMassLambda; tInvMassAntiLambda = InvMassAntiLambda; tCosPointingAngleV0 = CosPointingAngleV0; tThetaV0 = thetaV0; tThetaP = thetaP; tPhiV0 = phiV0; tPhiP = phiP; tMassTOFP = MassTOFP; // tDEta = deta; // tDPhiStar = dphis; // tDPhi = dphi; //tMassPair = pairMass; tSphericity = fSphericityvalue; tSpherocity = fSpherocityvalue; //tGammaCoversionMass = pairMassE; //tDTheta = dtheta; if(fIsMC == kTRUE){ tMCtruepair = isMCvector; tMCSameMother = sameMother; tMCMotherV0 = mcPDGMotherV0;//mcMotherBin; tMCMotherP = mcPDGMotherP;//mcMotherBin; tMCptcTypeV0 = typeV0 ; tMCptcTypeP = typeP ; tIsCommonParton = IsCommonParton; tMCSameGM = sameGrandMother; tMotherPDG = mcGrandMotherBin; tpdgcodeV0 = mcPDGcodeV0;//mcPDG1Bin; tpdgcodeP = mcPDGcodeP;//mcPDG2Bin; tKstarGen = pairKstarMC; } fHistSparseBkg->Fill(); } //mixed } // second part }//end event loop if (evmultmixed!=0) multmixedcounted = kTRUE; } // first part //if(multmixedcounted) fHistMultiplicityOfMixedEvent->Fill(evmultmixed); } // //---------------------------------------------------------------------------------------------- // //void AliAnalysisTaskK0SPFemto::DoPairshh (const Float_t lcentrality, int fieldsign) { // void AliAnalysisTaskK0SPFemto::DoPairshh (const Int_t lcentrality, int fieldsign, const Double_t fSphericityvalue) { // return; // } // //----------------------------------------------------------------------------------------------- double AliAnalysisTaskK0SPFemto::CalculateKstar(double momentum1[3], double momentum2[3], double mass1, double mass2) { // Jai S // Calculate k* for any pair of particles, regardless of whether the // particles have the same mass. double kstar = 0.; double e1 = 0.; double e2 = 0.; for(int i = 0; i < 3; i++){ kstar -= pow(momentum1[i]-momentum2[i],2); e1 += pow(momentum1[i],2); e2 += pow(momentum2[i],2); } e1 += pow(mass1,2); e1 = sqrt(e1); e2 += pow(mass2,2); e2 = sqrt(e2); kstar += pow(e1-e2,2); double totalMomentumSquared = 0; for(int i = 0; i < 3; i++){ totalMomentumSquared -= pow(momentum1[i]+momentum2[i],2); } totalMomentumSquared += pow(e1+e2,2); kstar -= pow((pow(mass1,2)-pow(mass2,2)),2)/totalMomentumSquared; kstar *= -1.; kstar = sqrt(kstar); //At this point, we've actually calculated Qinv kstar *= 0.5; // kstar is 0.5*Qinv return kstar; } //----------------------------------------------------------------------------------------------- // double AliAnalysisTaskK0SPFemto::CalculateMass(double momentum1[3], double momentum2[3], double mass1, double mass2) { // Jai S // // Calculate Invariant Mass // TLorentzVector vP1,vP2,vSum; // vP1.SetXYZM(momentum1[0],momentum1[1],momentum1[2],mass1); // vP2.SetXYZM(momentum2[0],momentum2[1],momentum2[2],mass2); // vSum=vP1+vP2; // double mass = vSum.M(); // return mass; // } // //----------------------------------------------------------------------------------------------- // double AliAnalysisTaskK0SPFemto::CalculateDphiSatR12m(Short_t chg1, Short_t chg2, Int_t magSign, Double_t ptv1, Double_t ptv2, Double_t phi1, Double_t phi2) { // AliFemto framework AliFemtoUser/AliFemtoPairCutRadialDistance.cxx + Dhevan not consistent? // /* // double rad = 1.2; // double afsi0b = 0.07510020733*chg1*magSign*rad/ptv1; // 0.075 = 0.3=e in H-L units*0.5=B/2 calculation on notebook // double afsi1b = 0.07510020733*chg2*magSign*rad/ptv2; // if (fabs(afsi0b) >=1.) return 9999.; // angle is pi/2 or not defined --> dont cut // if (fabs(afsi1b) >=1.) return 9999.; // MN modified these two lines returning 9999 and not kTRUE // // Double_t dps = phi2 - phi1 -TMath::ASin(afsi1b) + TMath::ASin(afsi0b); // // dps = TVector2::Phi_mpi_pi(dps); // double phi1bis =0.; // double phi2bis =0.; // phi1bis = phi1-TMath::ASin(afsi0b); // if(phi1bis > 2*PI) phi1bis -= 2*PI; // if(phi1bis < 0) phi1bis += 2*PI; // phi2bis = phi2 - TMath::ASin(afsi1b); // if(phi2bis > 2*PI) phi2bis -= 2*PI; // if(phi2bis < 0) phi2bis += 2*PI; // double deltaphi = phi2bis - phi1bis; // if(deltaphi > PI) deltaphi -= PI; // if(deltaphi < -PI) deltaphi += PI; // return deltaphi;//dps; // */ // // cout<<" Dphi "<<dps<<" Dhevan "<<deltaphi<<endl; // //from mariella // //analitical funcion // double rad = 1.2; // double afsi1b = 0.075*chg1*magSign*rad/ptv1; // 0.07510020733 = - 0.3 (= e in Heaviside-Lorentz units) *0.5 (= B in T) /2 (see later for the -), pT in GeV/c // double afsi2b = 0.075*chg2*magSign*rad/ptv2; // if (fabs(afsi1b) >=1.) return 9999.; // angle is pi/2 or not defined --> dont cut // if (fabs(afsi2b) >=1.) return 9999.; // MN modified these two lines returning 9999 and not kTRUE // double dps = phi2 - phi1 + TMath::ASin(afsi1b) -TMath::ASin(afsi2b); // - sign of e is outside Mariella // dps = TVector2::Phi_mpi_pi(dps); // return dps; // } // //----------------------------------------------------------------------------------------------- // double AliAnalysisTaskK0SPFemto::CalculateDPhiStar(Short_t chg1, Short_t chg2, Int_t magSign, Double_t ptv1, Double_t ptv2, Double_t phi1, Double_t phi2,Double_t rad) { //AliFemtoUser/AliFemtoPairCutDetaDphi.h // const Double_t unit_factor = 0.299792458 / 2.0; // const Double_t b_field = 0.5006670488586 * magSign; // Double_t shift1 = TMath::ASin(unit_factor * chg1 * b_field * rad / ptv1); // Double_t shift2 = TMath::ASin(unit_factor * chg2 * b_field * rad / ptv2); // double dps = (phi1 + shift1) - (phi2 + shift2); // // dps = TVector2::Phi_mpi_pi(dps); //to be checked // return dps; //deltaphi; // } // //_______________________________________________________________ // Double_t AliAnalysisTaskK0SPFemto::CalculateDeltaEta( Double_t eta1, Double_t eta2 ) { //AliFemtoUser/AliFemtoPairCutDetaDphi.h // const double deta = eta2 - eta1; // return deta; // } // //_______________________________________________________________ // Double_t AliAnalysisTaskK0SPFemto::CalculateDeltaTheta( Double_t theta1, Double_t theta2 ) { // const double dtheta = theta2 - theta1; // return dtheta; // } // //----------------------------------------------------------------------------------------------- // double AliAnalysisTaskK0SPFemto::CalculateDphiSatR12m(Double_t pos1SftR125[3], Double_t pos2SftR125[3]) { // Hans B // // Returns delta phi star at R = 1.2 m // const Float_t distSft = TMath::Sqrt(TMath::Power(pos1SftR125[0] - pos2SftR125[0],2) // + TMath::Power(pos1SftR125[1] - pos2SftR125[1],2)); // return 2.0 * TMath::ATan(distSft/2./(125.)); // } // //----------------------------------------------------------------------------------------------- // void AliAnalysisTaskK0SPFemto::SetSftPosR125(AliVTrack *track, const Float_t bfield, Double_t priVtx[3], Double_t posSftR125[3] ) { // Hans B // // Sets the spatial position of the track at the radius R=1.25m in the shifted coordinate system // // Initialize the array to something indicating there was no propagation // posSftR125[0]=-9999.; // posSftR125[1]=-9999.; // posSftR125[2]=-9999.; // // Make a copy of the track to not change parameters of the track // AliExternalTrackParam etp; // etp.CopyFromVTrack(track); // // The global position of the track // Double_t xyz[3]={-9999.,-9999.,-9999.}; // // The radius we want to propagate to, squared, for faster code // const Float_t rSquared = 125.*125.; // // Propagation is done in local x of the track // for (Float_t x = 58.;x<247.;x+=1.){ // // Starts at 83 / Sqrt(2) and goes outwards. 85/Sqrt(2) is the smallest local x // // for global radius 85 cm. x = 245 is the outer radial limit of the TPC when // // the track is straight, i.e. has inifinite pt and doesn't get bent. // // If the track's momentum is smaller than infinite, it will develop a y-component, // // which adds to the global radius // // We don't change the propagation steps to not mess up things! // // Stop if the propagation was not succesful. This can happen for low pt tracks // // that don't reach outer radii // if (!etp.PropagateTo(x,bfield)) { //cout<<"propagation failed!! and etss is "<<EtaS(posSftR125)<<endl; // break; // } // etp.GetXYZ(xyz); // GetXYZ returns global coordinates // // Calculate the shifted radius we are at, squared. // // Compare squared radii for faster code // Float_t shiftedRadiusSquared = (xyz[0]-priVtx[0])*(xyz[0]-priVtx[0]) // + (xyz[1]-priVtx[1])*(xyz[1]-priVtx[1]); // // Roughly reached the radius we want // if(shiftedRadiusSquared > rSquared){ // // Bigger loop has bad precision, we're nearly one centimeter too far, // // go back in small steps. // while (shiftedRadiusSquared>rSquared) { // // Propagate a mm inwards // x-=.1; // if (!etp.PropagateTo(x,bfield)){ // // Propagation failed but we're already with a // // cm precision at R=1.25m so we only break the // // inner loop // //cout<<"propagation failed!! and etss is "<<EtaS(posSftR125)<<endl; // break; // } // // Get the global position // etp.GetXYZ(xyz); // // Calculate shifted radius, squared // shiftedRadiusSquared = (xyz[0]-priVtx[0])*(xyz[0]-priVtx[0]) // + (xyz[1]-priVtx[1])*(xyz[1]-priVtx[1]); // } // // We reached R=1.25m with a precission of a cm to a mm, // // set the spatial position // posSftR125[0]=xyz[0]-priVtx[0]; // posSftR125[1]=xyz[1]-priVtx[1]; // posSftR125[2]=xyz[2]-priVtx[2]; // //cout<<" Pos 125 cm in function end "<<posSftR125[0]<<" "<<posSftR125[1]<<" "<<posSftR125[2]<<endl; // // Done // return; // } // End of if roughly reached radius // } // End of coarse propagation loop // } // //---------------------------------------------------------------------------------------------- // Double_t AliAnalysisTaskK0SPFemto::ThetaS( Double_t posSftR125[3] ) const { // Hans B // // Returns the longitudinal angle of the particles propagated // // position at R=1.25m. See // // https://edms.cern.ch/file/406391/2/ALICE-INT-2003-038.pdf // // for the ALICE coordinate system. Theta is zero at positive z, // // pi/2 at z = 0 aka the xy plane and pi at negative z // // R^ ^ // // | / // // |?'/ // // | / ? // // |/____>z // // // // Let's compute ?' and ? = pi/2 - ?' // // where ?' can even be and should // // sometimes be negative // // tan(?') = z/R // // ?' = arctan(z/R) // // ? = pi/2 - ?' // // = pi/2 - arctan(z/R) // // Note that in the doc above theta // // is calculated as arccos(z/sqrt(x^2+y^2+z^2)) // // Array of positions is 85,105,125,..cm, // // we take the z position at R=1.25m // // return TMath::Pi()/2. - TMath::ATan(fXshifted[2][2]/125.); // return TMath::Pi()/2. - TMath::ATan(posSftR125[2]/125.); // ok here R is really there --> transverse plane // } // //_______________________________________________________________ // Double_t AliAnalysisTaskK0SPFemto::EtaS( Double_t posSftR125[3] ) const { // Hans B // // Returns the corresponding eta of a pri. part. // // with this particles pos at R=1.25m // // http://en.wikipedia.org/wiki/Pseudorapidity // // ? = -ln[ tan(?/2)] // // printf("z: %+04.0f, thetaS %+03.2f etaS %+1.2f\n" // // ,fXshifted[2][2],ThetaS(),-TMath::Log( TMath::Tan(ThetaS()/2.) )); // return -TMath::Log( TMath::Tan(ThetaS(posSftR125 )/2.) ); // } // //_________________________________________________________________ Double_t AliAnalysisTaskK0SPFemto::CalculateSphericityofEvent(AliAODEvent *aodEvent) { //from Oliver Double_t Pt_tot = 0.; //total Pt of all protons and v0s in the event Double_t S00 = 0.; //Elements of the sphericity matrix Double_t S11 = 0.; Double_t S10 = 0.; Int_t NumOfTracks = aodEvent->GetNumberOfTracks(); if(NumOfTracks<3) return -9999.;//if already at this point not enough tracks are in the event -> return Int_t NTracks = 0; for(Int_t iTrack=0;iTrack<NumOfTracks;iTrack++) { AliAODTrack *aodtrack = dynamic_cast<AliAODTrack*>(aodEvent->GetTrack(iTrack)); if(!aodtrack->TestFilterBit(128)) continue; Double_t Pt = aodtrack->Pt(); //Double_t Phi = aodtrack->Phi(); Double_t Px = aodtrack->Px(); Double_t Py = aodtrack->Py(); Double_t eta = aodtrack->Eta(); if(!(eta>-0.8 && eta<0.8)) continue; if(Pt<0.5) continue; Pt_tot += Pt; S00 += Px*Px/Pt; S11 += Py*Py/Pt; S10 += Px*Py/Pt; NTracks++; } if(NTracks<3) return -9999.;//new flag: check //normalize to total Pt to obtain a linear form: if(Pt_tot == 0.) return -9999.; S00 /= Pt_tot; S11 /= Pt_tot; S10 /= Pt_tot; //Calculate the trace of the sphericity matrix: Double_t T = S00+S11; //Calculate the determinant of the sphericity matrix: Double_t D = S00*S11 - S10*S10;//S10 = S01 //Calculate the eigenvalues of the sphericity matrix: Double_t lambda1 = 0.5*(T + TMath::Sqrt(T*T - 4.*D)); Double_t lambda2 = 0.5*(T - TMath::Sqrt(T*T - 4.*D)); if((lambda1 + lambda2) == 0.) return -9999.; Double_t ST = -1.; if(lambda2>lambda1) { ST = 2.*lambda1/(lambda1+lambda2); } else { ST = 2.*lambda2/(lambda1+lambda2); } return ST; } double AliAnalysisTaskK0SPFemto::CalculateSpherocityEvent(AliAODEvent *evt) { float pFull = 0.f; float Spherocity = 2.f; const float pi = TMath::Pi(); float pTtot = 0.f; std::vector<float> pXVec; std::vector<float> pYVec; int numOfTracks = evt->GetNumberOfTracks(); if (numOfTracks < 3) return -9999.; for (int iTrack = 0; iTrack < numOfTracks; iTrack++) { AliAODTrack *track = dynamic_cast<AliAODTrack *>(evt->GetTrack(iTrack)); if (!track->TestFilterBit(96)) continue; double pt = track->Pt(); if (TMath::Abs(pt) < 0.5 || TMath::Abs(track->Eta()) > 0.8) { continue; } pTtot += pt; pXVec.push_back(track->Px()); pYVec.push_back(track->Py()); } if (pTtot == 0.f) return -9999.; const float OneOverPtTotal = 1.f / pTtot; float numerator = 0.f; float phiparam = 0.f; float nx = 0.f; float ny = 0.f; for (int i = 0; i < 360 / 0.1; ++i) { numerator = 0.f; phiparam = (pi * i * 0.1 / 180); // parametrization of the angle nx = TMath::Cos(phiparam); // x component of an unitary vector n ny = TMath::Sin(phiparam); // y component of an unitary vector n for (size_t itTrack = 0; itTrack < pXVec.size(); ++itTrack) { numerator += TMath::Abs(ny * pXVec[itTrack] - nx * pYVec[itTrack]); // product between p // proyection in XY plane and // the unitary vector } pFull = std::pow((numerator * OneOverPtTotal), 2); if (pFull < Spherocity) // maximization of pFull { Spherocity = pFull; } } return ((Spherocity) * pi * pi) / 4.0; } // //--------------------------------------------------- Methods From AliFemtoESDTrackCut.cxx // bool AliAnalysisTaskK0SPFemto::IsElectron(float nsigmaTPCE, float nsigmaTPCPi,float nsigmaTPCK, float nsigmaTPCP) // { // // if(TMath::Abs(nsigmaTPCE)<3 && TMath::Abs(nsigmaTPCPi)>3 && TMath::Abs(nsigmaTPCK)>3 && TMath::Abs(nsigmaTPCP)>3) // if(TMath::Abs(nsigmaTPCE)<3) // return false; // else // return true; // } // //---------------------------------------------------------- // bool AliAnalysisTaskK0SPFemto::IsPionNSigma(double mom, float nsigmaTPCPi, float nsigmaTOFPi) // { // //sligly changed w.r.t. the original // return false; // // if(mom<0.65){ // // // if(nsigmaTOFPi<-999.) // // if(nsigmaTOFPi==10) // // { // // //use TPC only // // if(mom<0.35 && TMath::Abs(nsigmaTPCPi)<3.0) return true; // // else if(mom<0.5 && mom>=0.35 && TMath::Abs(nsigmaTPCPi)<3.0) return true; // // else if(mom>=0.5 && TMath::Abs(nsigmaTPCPi)<2.0) return true; // // else return false; // // } // // else if(TMath::Abs(nsigmaTOFPi)<3.0 && TMath::Abs(nsigmaTPCPi)<3.0) return true; //TPC+TOF // // } // // //else if(nsigmaTOFPi<-10.) //p > 0.65 + no tof == kfalse // // else if(mom>0.65 && nsigmaTOFPi>3) //p > 0.65 + no tof == kfalse // // { // // return false; // // } // // else if(mom<1.5 && TMath::Abs(nsigmaTOFPi)<3.0 && TMath::Abs(nsigmaTPCPi)<5.0) return true; // // else if(mom>=1.5 && TMath::Abs(nsigmaTOFPi)<2.0 && TMath::Abs(nsigmaTPCPi)<5.0) return true; // // else // // return false; // } // /* // //---------------------------------------------------------- // bool AliAnalysisTaskK0SPFemto::IsKaonNSigma(float mom, float nsigmaTPCK, float nsigmaTOFK) // { // if (fNsigmaTPCTOF) { // if (mom > 0.5) { // // if (TMath::Hypot( nsigmaTOFP, nsigmaTPCP )/TMath::Sqrt(2) < 3.0) // if (TMath::Hypot( nsigmaTOFK, nsigmaTPCK ) < fNsigma) // return true; // } // else { // if (TMath::Abs(nsigmaTPCK) < fNsigma) // return true; // } // } // else { // if(mom<0.4) // { // if(nsigmaTOFK<-999.) // { // if(TMath::Abs(nsigmaTPCK)<2.0) return true; // } // else if(TMath::Abs(nsigmaTOFK)<3.0 && TMath::Abs(nsigmaTPCK)<3.0) return true; // } // else if(mom>=0.4 && mom<=0.6) // { // if(nsigmaTOFK<-999.) // { // if(TMath::Abs(nsigmaTPCK)<2.0) return true; // } // else if(TMath::Abs(nsigmaTOFK)<3.0 && TMath::Abs(nsigmaTPCK)<3.0) return true; // } // else if(nsigmaTOFK<-999.) // { // return false; // } // else if(TMath::Abs(nsigmaTOFK)<3.0 && TMath::Abs(nsigmaTPCK)<3.0) return true; // } // return false; // } // //---------------------------------------------------------- // bool AliAnalysisTaskK0SPFemto::IsProtonNSigma(float mom, float nsigmaTPCP, float nsigmaTOFP) // { // if (fNsigmaTPCTOF) { // if (mom > 0.5) { // // if (TMath::Hypot( nsigmaTOFP, nsigmaTPCP )/TMath::Sqrt(2) < 3.0) // if (TMath::Hypot( nsigmaTOFP, nsigmaTPCP ) < fNsigma) // return true; // } else if (TMath::Abs(nsigmaTPCP) < fNsigma) { // return true; // } // } // else if (fNsigmaTPConly) { // if (TMath::Abs(nsigmaTPCP) < fNsigma) // return true; // } // else { // if (mom > 0.8 && mom < 2.5) { // if ( TMath::Abs(nsigmaTPCP) < 3.0 && TMath::Abs(nsigmaTOFP) < 3.0) // return true; // } // else if (mom > 2.5) { // if ( TMath::Abs(nsigmaTPCP) < 3.0 && TMath::Abs(nsigmaTOFP) < 2.0) // return true; // } // else { // if (TMath::Abs(nsigmaTPCP) < 3.0) // return true; // } // } // return false; // } // */ //----------------------------------------------------------------------------------------------- void AliAnalysisTaskK0SPFemto::Terminate(const Option_t *) { // Draw result to the screen // Called once at the end of the query if (!GetOutputData(0)) return; }
/**************************************************************************** * * Copyright (c) 2013-2015 PX4 Development Team. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * 3. Neither the name PX4 nor the names of its contributors may be * used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************/ /** * @file trone.cpp * @author Luis Rodrigues * * Driver for the TeraRanger One range finders connected via I2C. */ #include <px4_config.h> #include <drivers/device/i2c.h> #include <sys/types.h> #include <stdint.h> #include <stdlib.h> #include <stdbool.h> #include <semaphore.h> #include <string.h> #include <fcntl.h> #include <poll.h> #include <errno.h> #include <stdio.h> #include <math.h> #include <unistd.h> #include <nuttx/arch.h> #include <nuttx/wqueue.h> #include <nuttx/clock.h> #include <systemlib/perf_counter.h> #include <systemlib/err.h> #include <drivers/drv_hrt.h> #include <drivers/drv_range_finder.h> #include <drivers/device/ringbuffer.h> #include <uORB/uORB.h> #include <uORB/topics/subsystem_info.h> #include <uORB/topics/distance_sensor.h> #include <board_config.h> /* Configuration Constants */ #define TRONE_BUS PX4_I2C_BUS_EXPANSION #define TRONE_BASEADDR 0x30 /* 7-bit address */ #define TRONE_DEVICE_PATH "/dev/trone" /* TRONE Registers addresses */ #define TRONE_MEASURE_REG 0x00 /* Measure range register */ #define TRONE_WHO_AM_I_REG 0x01 /* Who am I test register */ #define TRONE_WHO_AM_I_REG_VAL 0xA1 /* Device limits */ #define TRONE_MIN_DISTANCE (0.20f) #define TRONE_MAX_DISTANCE (14.00f) #define TRONE_CONVERSION_INTERVAL 50000 /* 50ms */ /* oddly, ERROR is not defined for c++ */ #ifdef ERROR # undef ERROR #endif static const int ERROR = -1; #ifndef CONFIG_SCHED_WORKQUEUE # error This requires CONFIG_SCHED_WORKQUEUE. #endif class TRONE : public device::I2C { public: TRONE(int bus = TRONE_BUS, int address = TRONE_BASEADDR); virtual ~TRONE(); virtual int init(); virtual ssize_t read(struct file *filp, char *buffer, size_t buflen); virtual int ioctl(struct file *filp, int cmd, unsigned long arg); /** * Diagnostics - print some basic information about the driver. */ void print_info(); protected: virtual int probe(); private: float _min_distance; float _max_distance; work_s _work; ringbuffer::RingBuffer *_reports; bool _sensor_ok; uint8_t _valid; int _measure_ticks; bool _collect_phase; int _class_instance; int _orb_class_instance; orb_advert_t _distance_sensor_topic; perf_counter_t _sample_perf; perf_counter_t _comms_errors; perf_counter_t _buffer_overflows; /** * Test whether the device supported by the driver is present at a * specific address. * * @param address The I2C bus address to probe. * @return True if the device is present. */ int probe_address(uint8_t address); /** * Initialise the automatic measurement state machine and start it. * * @note This function is called at open and error time. It might make sense * to make it more aggressive about resetting the bus in case of errors. */ void start(); /** * Stop the automatic measurement state machine. */ void stop(); /** * Set the min and max distance thresholds if you want the end points of the sensors * range to be brought in at all, otherwise it will use the defaults TRONE_MIN_DISTANCE * and TRONE_MAX_DISTANCE */ void set_minimum_distance(float min); void set_maximum_distance(float max); float get_minimum_distance(); float get_maximum_distance(); /** * Perform a poll cycle; collect from the previous measurement * and start a new one. */ void cycle(); int measure(); int collect(); /** * Static trampoline from the workq context; because we don't have a * generic workq wrapper yet. * * @param arg Instance pointer for the driver that is polling. */ static void cycle_trampoline(void *arg); }; static const uint8_t crc_table[] = { 0x00, 0x07, 0x0e, 0x09, 0x1c, 0x1b, 0x12, 0x15, 0x38, 0x3f, 0x36, 0x31, 0x24, 0x23, 0x2a, 0x2d, 0x70, 0x77, 0x7e, 0x79, 0x6c, 0x6b, 0x62, 0x65, 0x48, 0x4f, 0x46, 0x41, 0x54, 0x53, 0x5a, 0x5d, 0xe0, 0xe7, 0xee, 0xe9, 0xfc, 0xfb, 0xf2, 0xf5, 0xd8, 0xdf, 0xd6, 0xd1, 0xc4, 0xc3, 0xca, 0xcd, 0x90, 0x97, 0x9e, 0x99, 0x8c, 0x8b, 0x82, 0x85, 0xa8, 0xaf, 0xa6, 0xa1, 0xb4, 0xb3, 0xba, 0xbd, 0xc7, 0xc0, 0xc9, 0xce, 0xdb, 0xdc, 0xd5, 0xd2, 0xff, 0xf8, 0xf1, 0xf6, 0xe3, 0xe4, 0xed, 0xea, 0xb7, 0xb0, 0xb9, 0xbe, 0xab, 0xac, 0xa5, 0xa2, 0x8f, 0x88, 0x81, 0x86, 0x93, 0x94, 0x9d, 0x9a, 0x27, 0x20, 0x29, 0x2e, 0x3b, 0x3c, 0x35, 0x32, 0x1f, 0x18, 0x11, 0x16, 0x03, 0x04, 0x0d, 0x0a, 0x57, 0x50, 0x59, 0x5e, 0x4b, 0x4c, 0x45, 0x42, 0x6f, 0x68, 0x61, 0x66, 0x73, 0x74, 0x7d, 0x7a, 0x89, 0x8e, 0x87, 0x80, 0x95, 0x92, 0x9b, 0x9c, 0xb1, 0xb6, 0xbf, 0xb8, 0xad, 0xaa, 0xa3, 0xa4, 0xf9, 0xfe, 0xf7, 0xf0, 0xe5, 0xe2, 0xeb, 0xec, 0xc1, 0xc6, 0xcf, 0xc8, 0xdd, 0xda, 0xd3, 0xd4, 0x69, 0x6e, 0x67, 0x60, 0x75, 0x72, 0x7b, 0x7c, 0x51, 0x56, 0x5f, 0x58, 0x4d, 0x4a, 0x43, 0x44, 0x19, 0x1e, 0x17, 0x10, 0x05, 0x02, 0x0b, 0x0c, 0x21, 0x26, 0x2f, 0x28, 0x3d, 0x3a, 0x33, 0x34, 0x4e, 0x49, 0x40, 0x47, 0x52, 0x55, 0x5c, 0x5b, 0x76, 0x71, 0x78, 0x7f, 0x6a, 0x6d, 0x64, 0x63, 0x3e, 0x39, 0x30, 0x37, 0x22, 0x25, 0x2c, 0x2b, 0x06, 0x01, 0x08, 0x0f, 0x1a, 0x1d, 0x14, 0x13, 0xae, 0xa9, 0xa0, 0xa7, 0xb2, 0xb5, 0xbc, 0xbb, 0x96, 0x91, 0x98, 0x9f, 0x8a, 0x8d, 0x84, 0x83, 0xde, 0xd9, 0xd0, 0xd7, 0xc2, 0xc5, 0xcc, 0xcb, 0xe6, 0xe1, 0xe8, 0xef, 0xfa, 0xfd, 0xf4, 0xf3 }; static uint8_t crc8(uint8_t *p, uint8_t len) { uint16_t i; uint16_t crc = 0x0; while (len--) { i = (crc ^ *p++) & 0xFF; crc = (crc_table[i] ^ (crc << 8)) & 0xFF; } return crc & 0xFF; } /* * Driver 'main' command. */ extern "C" __EXPORT int trone_main(int argc, char *argv[]); TRONE::TRONE(int bus, int address) : I2C("TRONE", TRONE_DEVICE_PATH, bus, address, 100000), _min_distance(TRONE_MIN_DISTANCE), _max_distance(TRONE_MAX_DISTANCE), _reports(nullptr), _sensor_ok(false), _valid(0), _measure_ticks(0), _collect_phase(false), _class_instance(-1), _orb_class_instance(-1), _distance_sensor_topic(nullptr), _sample_perf(perf_alloc(PC_ELAPSED, "tr1_read")), _comms_errors(perf_alloc(PC_COUNT, "tr1_com_err")), _buffer_overflows(perf_alloc(PC_COUNT, "tr1_buf_of")) { // up the retries since the device misses the first measure attempts I2C::_retries = 3; // enable debug() calls _debug_enabled = false; // work_cancel in the dtor will explode if we don't do this... memset(&_work, 0, sizeof(_work)); } TRONE::~TRONE() { /* make sure we are truly inactive */ stop(); /* free any existing reports */ if (_reports != nullptr) { delete _reports; } if (_class_instance != -1) { unregister_class_devname(RANGE_FINDER_BASE_DEVICE_PATH, _class_instance); } // free perf counters perf_free(_sample_perf); perf_free(_comms_errors); perf_free(_buffer_overflows); } int TRONE::init() { int ret = ERROR; /* do I2C init (and probe) first */ if (I2C::init() != OK) { goto out; } /* allocate basic report buffers */ _reports = new ringbuffer::RingBuffer(2, sizeof(distance_sensor_s)); if (_reports == nullptr) { goto out; } _class_instance = register_class_devname(RANGE_FINDER_BASE_DEVICE_PATH); if (_class_instance == CLASS_DEVICE_PRIMARY) { /* get a publish handle on the range finder topic */ struct distance_sensor_s ds_report; measure(); _reports->get(&ds_report); _distance_sensor_topic = orb_advertise_multi(ORB_ID(distance_sensor), &ds_report, &_orb_class_instance, ORB_PRIO_LOW); if (_distance_sensor_topic == nullptr) { DEVICE_LOG("failed to create distance_sensor object. Did you start uOrb?"); } } ret = OK; /* sensor is ok, but we don't really know if it is within range */ _sensor_ok = true; out: return ret; } int TRONE::probe() { uint8_t who_am_i = 0; const uint8_t cmd = TRONE_WHO_AM_I_REG; // set the I2C bus address set_address(TRONE_BASEADDR); // can't use a single transfer as TROne need a bit of time for internal processing if (transfer(&cmd, 1, nullptr, 0) == OK) { if (transfer(nullptr, 0, &who_am_i, 1) == OK && who_am_i == TRONE_WHO_AM_I_REG_VAL) { return measure(); } } DEVICE_DEBUG("WHO_AM_I byte mismatch 0x%02x should be 0x%02x\n", (unsigned)who_am_i, TRONE_WHO_AM_I_REG_VAL); // not found on any address return -EIO; } void TRONE::set_minimum_distance(float min) { _min_distance = min; } void TRONE::set_maximum_distance(float max) { _max_distance = max; } float TRONE::get_minimum_distance() { return _min_distance; } float TRONE::get_maximum_distance() { return _max_distance; } int TRONE::ioctl(struct file *filp, int cmd, unsigned long arg) { switch (cmd) { case SENSORIOCSPOLLRATE: { switch (arg) { /* switching to manual polling */ case SENSOR_POLLRATE_MANUAL: stop(); _measure_ticks = 0; return OK; /* external signalling (DRDY) not supported */ case SENSOR_POLLRATE_EXTERNAL: /* zero would be bad */ case 0: return -EINVAL; /* set default/max polling rate */ case SENSOR_POLLRATE_MAX: case SENSOR_POLLRATE_DEFAULT: { /* do we need to start internal polling? */ bool want_start = (_measure_ticks == 0); /* set interval for next measurement to minimum legal value */ _measure_ticks = USEC2TICK(TRONE_CONVERSION_INTERVAL); /* if we need to start the poll state machine, do it */ if (want_start) { start(); } return OK; } /* adjust to a legal polling interval in Hz */ default: { /* do we need to start internal polling? */ bool want_start = (_measure_ticks == 0); /* convert hz to tick interval via microseconds */ unsigned ticks = USEC2TICK(1000000 / arg); /* check against maximum rate */ if (ticks < USEC2TICK(TRONE_CONVERSION_INTERVAL)) { return -EINVAL; } /* update interval for next measurement */ _measure_ticks = ticks; /* if we need to start the poll state machine, do it */ if (want_start) { start(); } return OK; } } } case SENSORIOCGPOLLRATE: if (_measure_ticks == 0) { return SENSOR_POLLRATE_MANUAL; } return (1000 / _measure_ticks); case SENSORIOCSQUEUEDEPTH: { /* lower bound is mandatory, upper bound is a sanity check */ if ((arg < 1) || (arg > 100)) { return -EINVAL; } irqstate_t flags = px4_enter_critical_section(); if (!_reports->resize(arg)) { px4_leave_critical_section(flags); return -ENOMEM; } px4_leave_critical_section(flags); return OK; } case SENSORIOCGQUEUEDEPTH: return _reports->size(); case SENSORIOCRESET: /* XXX implement this */ return -EINVAL; case RANGEFINDERIOCSETMINIUMDISTANCE: { set_minimum_distance(*(float *)arg); return 0; } break; case RANGEFINDERIOCSETMAXIUMDISTANCE: { set_maximum_distance(*(float *)arg); return 0; } break; default: /* give it to the superclass */ return I2C::ioctl(filp, cmd, arg); } } ssize_t TRONE::read(struct file *filp, char *buffer, size_t buflen) { unsigned count = buflen / sizeof(struct distance_sensor_s); struct distance_sensor_s *rbuf = reinterpret_cast<struct distance_sensor_s *>(buffer); int ret = 0; /* buffer must be large enough */ if (count < 1) { return -ENOSPC; } /* if automatic measurement is enabled */ if (_measure_ticks > 0) { /* * While there is space in the caller's buffer, and reports, copy them. * Note that we may be pre-empted by the workq thread while we are doing this; * we are careful to avoid racing with them. */ while (count--) { if (_reports->get(rbuf)) { ret += sizeof(*rbuf); rbuf++; } } /* if there was no data, warn the caller */ return ret ? ret : -EAGAIN; } /* manual measurement - run one conversion */ do { _reports->flush(); /* trigger a measurement */ if (OK != measure()) { ret = -EIO; break; } /* wait for it to complete */ usleep(TRONE_CONVERSION_INTERVAL); /* run the collection phase */ if (OK != collect()) { ret = -EIO; break; } /* state machine will have generated a report, copy it out */ if (_reports->get(rbuf)) { ret = sizeof(*rbuf); } } while (0); return ret; } int TRONE::measure() { int ret; /* * Send the command to begin a measurement. */ const uint8_t cmd = TRONE_MEASURE_REG; ret = transfer(&cmd, sizeof(cmd), nullptr, 0); if (OK != ret) { perf_count(_comms_errors); DEVICE_LOG("i2c::transfer returned %d", ret); return ret; } ret = OK; return ret; } int TRONE::collect() { int ret = -EIO; /* read from the sensor */ uint8_t val[3] = {0, 0, 0}; perf_begin(_sample_perf); ret = transfer(nullptr, 0, &val[0], 3); if (ret < 0) { DEVICE_LOG("error reading from sensor: %d", ret); perf_count(_comms_errors); perf_end(_sample_perf); return ret; } uint16_t distance_mm = (val[0] << 8) | val[1]; float distance_m = float(distance_mm) * 1e-3f; struct distance_sensor_s report; report.timestamp = hrt_absolute_time(); /* there is no enum item for a combined LASER and ULTRASOUND which it should be */ report.type = distance_sensor_s::MAV_DISTANCE_SENSOR_LASER; report.orientation = 8; report.current_distance = distance_m; report.min_distance = get_minimum_distance(); report.max_distance = get_maximum_distance(); report.covariance = 0.0f; /* TODO: set proper ID */ report.id = 0; // This validation check can be used later _valid = crc8(val, 2) == val[2] && (float)report.current_distance > report.min_distance && (float)report.current_distance < report.max_distance ? 1 : 0; /* publish it, if we are the primary */ if (_distance_sensor_topic != nullptr) { orb_publish(ORB_ID(distance_sensor), _distance_sensor_topic, &report); } if (_reports->force(&report)) { perf_count(_buffer_overflows); } /* notify anyone waiting for data */ poll_notify(POLLIN); ret = OK; perf_end(_sample_perf); return ret; } void TRONE::start() { /* reset the report ring and state machine */ _collect_phase = false; _reports->flush(); /* schedule a cycle to start things */ work_queue(HPWORK, &_work, (worker_t)&TRONE::cycle_trampoline, this, 1); /* notify about state change */ struct subsystem_info_s info = {}; info.present = true; info.enabled = true; info.ok = true; info.subsystem_type = subsystem_info_s::SUBSYSTEM_TYPE_RANGEFINDER; static orb_advert_t pub = nullptr; if (pub != nullptr) { orb_publish(ORB_ID(subsystem_info), pub, &info); } else { pub = orb_advertise(ORB_ID(subsystem_info), &info); } } void TRONE::stop() { work_cancel(HPWORK, &_work); } void TRONE::cycle_trampoline(void *arg) { TRONE *dev = (TRONE *)arg; dev->cycle(); } void TRONE::cycle() { /* collection phase? */ if (_collect_phase) { /* perform collection */ if (OK != collect()) { DEVICE_LOG("collection error"); /* restart the measurement state machine */ start(); return; } /* next phase is measurement */ _collect_phase = false; /* * Is there a collect->measure gap? */ if (_measure_ticks > USEC2TICK(TRONE_CONVERSION_INTERVAL)) { /* schedule a fresh cycle call when we are ready to measure again */ work_queue(HPWORK, &_work, (worker_t)&TRONE::cycle_trampoline, this, _measure_ticks - USEC2TICK(TRONE_CONVERSION_INTERVAL)); return; } } /* measurement phase */ if (OK != measure()) { DEVICE_LOG("measure error"); } /* next phase is collection */ _collect_phase = true; /* schedule a fresh cycle call when the measurement is done */ work_queue(HPWORK, &_work, (worker_t)&TRONE::cycle_trampoline, this, USEC2TICK(TRONE_CONVERSION_INTERVAL)); } void TRONE::print_info() { perf_print_counter(_sample_perf); perf_print_counter(_comms_errors); perf_print_counter(_buffer_overflows); printf("poll interval: %u ticks\n", _measure_ticks); _reports->print_info("report queue"); } /** * Local functions in support of the shell command. */ namespace trone { /* oddly, ERROR is not defined for c++ */ #ifdef ERROR # undef ERROR #endif const int ERROR = -1; TRONE *g_dev; void start(); void stop(); void test(); void reset(); void info(); /** * Start the driver. */ void start() { int fd; if (g_dev != nullptr) { errx(1, "already started"); } /* create the driver */ g_dev = new TRONE(TRONE_BUS); if (g_dev == nullptr) { goto fail; } if (OK != g_dev->init()) { goto fail; } /* set the poll rate to default, starts automatic data collection */ fd = open(TRONE_DEVICE_PATH, O_RDONLY); if (fd < 0) { goto fail; } if (ioctl(fd, SENSORIOCSPOLLRATE, SENSOR_POLLRATE_DEFAULT) < 0) { goto fail; } exit(0); fail: if (g_dev != nullptr) { delete g_dev; g_dev = nullptr; } errx(1, "driver start failed"); } /** * Stop the driver */ void stop() { if (g_dev != nullptr) { delete g_dev; g_dev = nullptr; } else { errx(1, "driver not running"); } exit(0); } /** * Perform some basic functional tests on the driver; * make sure we can collect data from the sensor in polled * and automatic modes. */ void test() { struct distance_sensor_s report; ssize_t sz; int ret; int fd = open(TRONE_DEVICE_PATH, O_RDONLY); if (fd < 0) { err(1, "%s open failed (try 'trone start' if the driver is not running", TRONE_DEVICE_PATH); } /* do a simple demand read */ sz = read(fd, &report, sizeof(report)); if (sz != sizeof(report)) { err(1, "immediate read failed"); } warnx("single read"); warnx("measurement: %0.2f m", (double)report.current_distance); warnx("time: %llu", report.timestamp); /* start the sensor polling at 2Hz */ if (OK != ioctl(fd, SENSORIOCSPOLLRATE, 2)) { errx(1, "failed to set 2Hz poll rate"); } /* read the sensor 50x and report each value */ for (unsigned i = 0; i < 50; i++) { struct pollfd fds; /* wait for data to be ready */ fds.fd = fd; fds.events = POLLIN; ret = poll(&fds, 1, 2000); if (ret != 1) { errx(1, "timed out waiting for sensor data"); } /* now go get it */ sz = read(fd, &report, sizeof(report)); if (sz != sizeof(report)) { err(1, "periodic read failed"); } warnx("periodic read %u", i); warnx("measurement: %0.3f", (double)report.current_distance); warnx("time: %llu", report.timestamp); } /* reset the sensor polling to default rate */ if (OK != ioctl(fd, SENSORIOCSPOLLRATE, SENSOR_POLLRATE_DEFAULT)) { errx(1, "failed to set default poll rate"); } errx(0, "PASS"); } /** * Reset the driver. */ void reset() { int fd = open(TRONE_DEVICE_PATH, O_RDONLY); if (fd < 0) { err(1, "failed "); } if (ioctl(fd, SENSORIOCRESET, 0) < 0) { err(1, "driver reset failed"); } if (ioctl(fd, SENSORIOCSPOLLRATE, SENSOR_POLLRATE_DEFAULT) < 0) { err(1, "driver poll restart failed"); } exit(0); } /** * Print a little info about the driver. */ void info() { if (g_dev == nullptr) { errx(1, "driver not running"); } printf("state @ %p\n", g_dev); g_dev->print_info(); exit(0); } } // namespace int trone_main(int argc, char *argv[]) { /* * Start/load the driver. */ if (!strcmp(argv[1], "start")) { trone::start(); } /* * Stop the driver */ if (!strcmp(argv[1], "stop")) { trone::stop(); } /* * Test the driver/device. */ if (!strcmp(argv[1], "test")) { trone::test(); } /* * Reset the driver. */ if (!strcmp(argv[1], "reset")) { trone::reset(); } /* * Print driver information. */ if (!strcmp(argv[1], "info") || !strcmp(argv[1], "status")) { trone::info(); } errx(1, "unrecognized command, try 'start', 'test', 'reset' or 'info'"); }
/* ** Copyright 2014-2015 Centreon ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. ** ** For more information : contact@centreon.com */ #ifndef CCB_NOTIFICATION_FACTORY_HH #define CCB_NOTIFICATION_FACTORY_HH #include "com/centreon/broker/io/factory.hh" #include "com/centreon/broker/namespace.hh" CCB_BEGIN() namespace notification { /** * @class factory factory.hh "com/centreon/broker/notification/factory.hh" * @brief Notification layer factory. * * Build Notification layer objects. */ class factory : public io::factory { public: factory() = default; factory(factory const& other) = delete; ~factory() = default; factory& operator=(factory const& other) = delete; bool has_endpoint(config::endpoint& cfg); io::endpoint* new_endpoint(config::endpoint& cfg, bool& is_acceptor, std::shared_ptr<persistent_cache> cache = std::shared_ptr<persistent_cache>()) const; }; } // namespace notification CCB_END() #endif // !CCB_NOTIFICATION_FACTORY_HH
#include <cmath> #include <iostream> #define MAX 1000001 using namespace std; int main(void) { int N, i = 0; bool d[MAX] = {1, 1}; ios_base::sync_with_stdio(false); cin.tie(NULL); for (int i = 2; i < sqrt(MAX); ++i) { if (d[i] == false) { for (int j = i * i; j < MAX; j += i) { d[j] = true; } } } cin >> N; while (N) { for (i = 2; i <= N; ++i) { if (d[i] == false && d[N - i] == false) { break; } } if (i > N) { cout << "Goldbach's conjecture is wrong." << '\n'; } else { cout << N << " = " << i << " + " << N - i << '\n'; } cin >> N; } }
#include "phi_widget_text.h" PHI_Widget_Text::PHI_Widget_Text(int16_t x, int16_t y, int16_t w, int16_t h): PHI_Widget_Graphic_Base(x, y, w, h, true, true) { } void PHI_Widget_Text::Render(JsonVariant data) { PHI_Widget_Graphic_Base::Render(data); String description = data["description"]; String value = data["value"]; String hint = data["hint"]; this->_Canvas->setTextSize(TEXT_SIZE); this->_Canvas->setTextColor(FONT_COLOR); this->_Canvas->setTextDatum(MC_DATUM); this->_Canvas->drawString(hint.c_str(), _w/2, 35); this->_CanvasPressed->setTextSize(TEXT_SIZE); this->_CanvasPressed->setTextColor(FONT_COLOR); this->_CanvasPressed->setTextDatum(MC_DATUM); this->_CanvasPressed->drawString(hint.c_str(), _w/2, 35); this->_Canvas->setTextSize(TEXT_SIZE); this->_Canvas->setTextColor(FONT_COLOR); this->_Canvas->setTextDatum(MC_DATUM); this->_Canvas->drawString(value.c_str(), _w/2, _h/2); this->_CanvasPressed->setTextSize(TEXT_SIZE); this->_CanvasPressed->setTextColor(FONT_COLOR); this->_CanvasPressed->setTextDatum(MC_DATUM); this->_CanvasPressed->drawString(value.c_str(), _w/2, _h/2); RenderDescriptionLabel(description.c_str()); }
/* October Lunchtime 2020 Division 2 - Chef Is Just Throwing Random Words https://www.codechef.com/LTIME89B/problems/SSO */ #include <bits/stdc++.h> using namespace std; typedef long long ll; typedef pair<int, int> pii; typedef pair<ll, ll> pll; typedef pair<string, string> pss; typedef vector<int> vi; typedef vector<vi> vvi; typedef vector<pii> vii; typedef vector<ll> vl; typedef vector<vl> vvl; double EPS=1e-9; int INF=1000000005; long long INFF=1000000000000000005ll; double PI=acos(-1); int dirx[8]={ -1, 0, 0, 1, -1, -1, 1, 1 }; int diry[8]={ 0, 1, -1, 0, -1, 1, -1, 1 }; const ll MOD = 1000000007; ll sum() { return 0; } template<typename T, typename... Args> T sum(T a, Args... args) { return a + sum(args...); } #define DEBUG fprintf(stderr, "====TESTING====\n") #define VALUE(x) cerr << "The value of " << #x << " is " << x << endl #define OUT(x) cout << x << endl #define OUTH(x) cout << x << " " #define debug(...) fprintf(stderr, __VA_ARGS__) #define READ(x) for(auto &(z):x) cin >> z; #define FOR(a, b, c) for (int(a)=(b); (a) < (c); ++(a)) #define FORN(a, b, c) for (int(a)=(b); (a) <= (c); ++(a)) #define FORD(a, b, c) for (int(a)=(b); (a) >= (c); --(a)) #define FORSQ(a, b, c) for (int(a)=(b); (a) * (a) <= (c); ++(a)) #define FORC(a, b, c) for (char(a)=(b); (a) <= (c); ++(a)) #define EACH(a, b) for (auto&(a) : (b)) #define REP(i, n) FOR(i, 0, n) #define REPN(i, n) FORN(i, 1, n) #define MAX(a, b) a=max(a, b) #define MIN(a, b) a=min(a, b) #define SQR(x) ((ll)(x) * (x)) #define RESET(a, b) memset(a, b, sizeof(a)) #define fi first #define se second #define mp make_pair #define pb push_back #define ALL(v) v.begin(), v.end() #define ALLA(arr, sz) arr, arr + sz #define SIZE(v) (int)v.size() #define SORT(v) sort(ALL(v)) #define REVERSE(v) reverse(ALL(v)) #define SORTA(arr, sz) sort(ALLA(arr, sz)) #define REVERSEA(arr, sz) reverse(ALLA(arr, sz)) #define PERMUTE next_permutation #define TC(t) while (t--) #define FAST_INP ios_base::sync_with_stdio(false);cin.tie(NULL) #define what_is(x) cerr << #x << " is " << x << endl; void solve() { int n; cin >> n; ll sum = 0, ans = 0, a; REP(i, n) { cin >> a; ans |= a; sum += a; ans |= sum; } OUT(ans); } int main() { FAST_INP; // #ifndef ONLINE_JUDGE // freopen("input.txt","r", stdin); // freopen("output.txt","w", stdout); // #endif int tc; cin >> tc; TC(tc) solve(); return 0; }
/* Copyright 2015 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "tensorflow/core/framework/common_shape_fns.h" #include "tensorflow/core/framework/op.h" #include "tensorflow/core/framework/shape_inference.h" namespace tensorflow { using shape_inference::InferenceContext; using shape_inference::ShapeHandle; REGISTER_OP("QuantizeV2") .Input("input: float") .Input("min_range: float") .Input("max_range: float") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("T: quantizedtype") .Attr("mode: {'MIN_COMBINED', 'MIN_FIRST'} = 'MIN_COMBINED'") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. [min_range, max_range] are scalar floats that specify the range for the 'input' data. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) if T == qint8, out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` *MIN_COMBINED Mode Example* Assume the input is type float and has a possible range of [0.0, 6.0] and the output type is quint8 ([0, 255]). The min_range and max_range values should be specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each value of the input by 255/6 and cast to quint8. If the output type was qint8 ([-128, 127]), the operation will additionally subtract each value by 128 prior to casting, so that the range of values aligns with the range of qint8. If the mode is 'MIN_FIRST', then this approach is used: ``` number_of_steps = 1 << (# of bits in T) range_adjust = number_of_steps / (number_of_steps - 1) range = (range_max - range_min) * range_adjust range_scale = number_of_steps / range quantized = round(input * range_scale) - round(range_min * range_scale) + numeric_limits<T>::min() quantized = max(quantized, numeric_limits<T>::min()) quantized = min(quantized, numeric_limits<T>::max()) ``` The biggest difference between this and MIN_COMBINED is that the minimum range is rounded first, before it's subtracted from the rounded value. With MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing and dequantizing will introduce a larger and larger error. One thing to watch out for is that the operator may choose to adjust the requested minimum and maximum values slightly during the quantization process, so you should always use the output ports as the range for further calculations. For example, if the requested minimum and maximum values are close to equal, they will be separated by a small epsilon value to prevent ill-formed quantized buffers from being created. Otherwise, you can end up with buffers where all the quantized values map to the same float value, which causes problems for operations that have to perform further calculations on them. min_range: The minimum scalar value possibly produced for the input. max_range: The maximum scalar value possibly produced for the input. output: The quantized data produced from the float input. output_min: The actual minimum scalar value used for the output. output_max: The actual maximum scalar value used for the output. )doc"); REGISTER_OP("Dequantize") .Input("input: T") .Input("min_range: float") .Input("max_range: float") .Output("output: float") .Attr("T: quantizedtype") .Attr("mode: {'MIN_COMBINED', 'MIN_FIRST'} = 'MIN_COMBINED'") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::UnchangedShape(c)); ShapeHandle unused; TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &unused)); TF_RETURN_IF_ERROR(c->WithRank(c->input(2), 0, &unused)); return Status::OK(); }) .Doc(R"doc( Dequantize the 'input' tensor into a float Tensor. [min_range, max_range] are scalar floats that specify the range for the 'input' data. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: ``` if T == qint8, in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` *MIN_COMBINED Mode Example* If the input comes from a QuantizedRelu6, the output type is quint8 (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The min_range and max_range values are therefore 0.0 and 6.0. Dequantize on quint8 will take each value, cast to float, and multiply by 6 / 255. Note that if quantizedtype is qint8, the operation will additionally add each value by 128 prior to casting. If the mode is 'MIN_FIRST', then this approach is used: ``` number_of_steps = 1 << (# of bits in T) range_adjust = number_of_steps / (number_of_steps - 1) range = (range_max - range_min) * range_adjust range_scale = range / number_of_steps const double offset_input = static_cast<double>(input) - lowest_quantized; result = range_min + ((input - numeric_limits<T>::min()) * range_scale) ``` min_range: The minimum scalar value possibly produced for the input. max_range: The maximum scalar value possibly produced for the input. )doc"); REGISTER_OP("QuantizedConcat") .Input("concat_dim: int32") .Input("values: N * T") .Input("input_mins: N * float32") .Input("input_maxes: N * float32") .Output("output: T") .Output("output_min: float") .Output("output_max: float") .Attr("N: int >= 2") .Attr("T: type") .SetShapeFn([](InferenceContext* c) { TF_RETURN_IF_ERROR(shape_inference::ConcatShape(c)); ShapeHandle unused; for (int i = 2; i < c->num_inputs(); ++i) { TF_RETURN_IF_ERROR(c->WithRank(c->input(i), 0, &unused)); } c->set_output(1, c->Scalar()); c->set_output(2, c->Scalar()); return Status::OK(); }) .Doc(R"doc( Concatenates quantized tensors along one dimension. concat_dim: 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). values: The `N` Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions except `concat_dim`. input_mins: The minimum scalar values for each of the input tensors. input_maxes: The maximum scalar values for each of the input tensors. output_min: The float value that the minimum quantized output value represents. output_max: The float value that the maximum quantized output value represents. output: A `Tensor` with the concatenation of values stacked along the `concat_dim` dimension. This tensor's shape matches that of `values` except in `concat_dim` where it has the sum of the sizes. )doc"); } // namespace tensorflow
#pragma once #include <thread> class NatNet { private: std::thread natnet_thread_; public: NatNet(); ~NatNet(); void natnet_rx(); void sample_data(); void velocity_thread(); };
#include "core.h" #include "wbactionrodinblackboardwrite.h" #include "configmanager.h" #include "Components/wbcomprodinblackboard.h" #include "wbactionstack.h" WBActionRodinBlackboardWrite::WBActionRodinBlackboardWrite() : m_BlackboardKey() , m_ValuePE() { } WBActionRodinBlackboardWrite::~WBActionRodinBlackboardWrite() { } /*virtual*/ void WBActionRodinBlackboardWrite::InitializeFromDefinition( const SimpleString& DefinitionName ) { WBAction::InitializeFromDefinition( DefinitionName ); MAKEHASH( DefinitionName ); STATICHASH( BlackboardKey ); m_BlackboardKey = ConfigManager::GetHash( sBlackboardKey, HashedString::NullString, sDefinitionName ); STATICHASH( ValuePE ); m_ValuePE.InitializeFromDefinition( ConfigManager::GetString( sValuePE, "", sDefinitionName ) ); } /*virtual*/ void WBActionRodinBlackboardWrite::Execute() { WBEntity* const pEntity = GetEntity(); DEVASSERT( pEntity ); WBCompRodinBlackboard* const pBlackboard = GET_WBCOMP( pEntity, RodinBlackboard ); ASSERT( pBlackboard ); WBParamEvaluator::SPEContext Context; Context.m_Entity = pEntity; m_ValuePE.Evaluate( Context ); pBlackboard->Set( m_BlackboardKey, m_ValuePE ); }
/****************************************************************************** * Copyright 2017 Baidu Robotic Vision Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "IBA/IBA.h" #include "feature_utils.h" #include "image_utils.h" #include "xp_quaternion.h" #include "param.h" // calib #include "basic_datatype.h" #include "iba_helper.h" #include "pose_viewer.h" #include <boost/filesystem.hpp> #include <boost/lexical_cast.hpp> #include <glog/logging.h> #include <gflags/gflags.h> #include <opencv2/core.hpp> #include <algorithm> #include <string> #include <fstream> #include <vector> namespace fs = boost::filesystem; using std::string; using std::vector; DEFINE_string(imgs_folder, "", "The folder containing l and r folders, and the calib.yaml"); DEFINE_int32(grid_row_num, 1, "Number of rows of detection grids"); DEFINE_int32(grid_col_num, 1, "Number of cols of detection grids"); DEFINE_int32(max_num_per_grid, 70, "Max number of points per grid"); DEFINE_double(feat_quality, 0.07, "Tomasi-Shi feature quality level"); DEFINE_double(feat_min_dis, 10, "Tomasi-Shi feature minimal distance"); DEFINE_bool(not_use_fast, false, "Whether or not use FAST"); DEFINE_int32(pyra_level, 2, "Total pyramid levels"); DEFINE_int32(start_idx, 0, "The image index of the first detection (from 0)"); DEFINE_int32(end_idx, -1, "The image index of the last detection"); DEFINE_double(uniform_radius, 40, "< 5 disables uniformaty enforcement"); DEFINE_int32(ft_len, 125, "The feature track length threshold when dropout kicks in"); DEFINE_double(ft_droprate, 0.05, "The drop out rate when a feature track exceeds ft_len"); DEFINE_bool(show_feat_only, false, "wether or not show detection results only"); DEFINE_int32(fast_thresh, 10, "FAST feature threshold (only meaningful if use_fast=true)"); DEFINE_double(min_feature_distance_over_baseline_ratio, 4, "Used for slave image feature detection"); DEFINE_double(max_feature_distance_over_baseline_ratio, 3000, "Used for slave image feature detection"); DEFINE_string(iba_param_path, "", "iba parameters path"); DEFINE_string(gba_camera_save_path, "", "Save the camera states to when finished"); size_t load_image_data(const string& image_folder, std::vector<string> &limg_name, std::vector<string> &rimg_name) { LOG(INFO) << "Loading " << image_folder; std::string l_path = image_folder + "/mav0/cam0/data.csv"; std::string r_path = image_folder + "/mav0/cam1/data.csv"; std::string r_img_prefix = image_folder + "/mav0/cam1/data/"; std::ifstream limg_file(l_path); std::ifstream rimg_file(r_path); if (!limg_file.is_open() || !rimg_file.is_open()) { LOG(WARNING) << image_folder << " cannot be opened"; return 0; } std::string line; std::string time; while (getline(limg_file,line)) { if (line[0] == '#') continue; std::istringstream is(line); int i = 0; while (getline(is, time, ',')){ bool is_exist = boost::filesystem::exists(r_img_prefix + time + ".png"); if (i == 0 && is_exist){ limg_name.push_back(time + ".png"); rimg_name.push_back(time + ".png"); } i++; } } limg_file.close(); rimg_file.close(); LOG(INFO)<< "loaded " << limg_name.size() << " images"; return limg_name.size(); } size_t load_imu_data(const string& imu_file_str, std::list<XP::ImuData>* imu_samples_ptr, uint64_t &offset_ts_ns) { CHECK(imu_samples_ptr != NULL); LOG(INFO) << "Loading " << imu_file_str; std::ifstream imu_file(imu_file_str.c_str()); if (!imu_file.is_open()) { LOG(WARNING) << imu_file_str << " cannot be opened"; return 0; } std::list<XP::ImuData>& imu_samples = *imu_samples_ptr; imu_samples.clear(); // read imu data std::string line; std::string item; double c[6]; uint64_t t; bool set_offset_time = false; while (getline(imu_file,line)) { if (line[0] == '#') continue; std::istringstream is(line); int i = 0; while (getline(is, item, ',')) { std::stringstream ss; ss << item; if (i == 0) ss >> t; else ss >> c[i-1]; i++; } if (!set_offset_time) { set_offset_time = true; offset_ts_ns = t; } XP::ImuData imu_sample; float _t_100us = (t - offset_ts_ns)/1e5; imu_sample.time_stamp = _t_100us/1e4; imu_sample.ang_v(0) = c[0]; imu_sample.ang_v(1) = c[1]; imu_sample.ang_v(2) = c[2]; imu_sample.accel(0) = c[3]; imu_sample.accel(1) = c[4]; imu_sample.accel(2) = c[5]; VLOG(3) << "accel " << imu_sample.accel.transpose() << " gyro " << imu_sample.ang_v.transpose(); imu_samples.push_back(imu_sample); } imu_file.close(); LOG(INFO)<< "loaded " << imu_samples.size() << " imu samples"; return imu_samples.size(); } void load_asl_calib(const std::string &asl_path, XP::DuoCalibParam &calib_param) { std::string cam0_yaml = asl_path + "/mav0/cam0/sensor.yaml"; std::string cam1_yaml = asl_path + "/mav0/cam1/sensor.yaml"; std::string imu0_yaml = asl_path + "/mav0/imu0/sensor.yaml"; YAML::Node cam0_calib = YAML::LoadFile(cam0_yaml); YAML::Node cam1_calib = YAML::LoadFile(cam1_yaml); YAML::Node imu0_calib = YAML::LoadFile(imu0_yaml); // intrinsics std::vector<float> v_float = cam0_calib["intrinsics"].as<std::vector<float>>(); calib_param.Camera.cv_camK_lr[0] << v_float[0], 0, v_float[2], 0, v_float[1], v_float[3], 0, 0, 1; calib_param.Camera.cameraK_lr[0] << v_float[0], 0, v_float[2], 0, v_float[1], v_float[3], 0, 0, 1; v_float = cam1_calib["intrinsics"].as<std::vector<float>>(); calib_param.Camera.cv_camK_lr[1] << v_float[0], 0, v_float[2], 0, v_float[1], v_float[3], 0, 0, 1; calib_param.Camera.cameraK_lr[1] << v_float[0], 0, v_float[2], 0, v_float[1], v_float[3], 0, 0, 1; // distortion_coefficients std::vector<double> v_double = cam0_calib["distortion_coefficients"].as<std::vector<double>>(); calib_param.Camera.cv_dist_coeff_lr[0] = (cv::Mat_<float>(8, 1) << static_cast<float>(v_double[0]), static_cast<float>(v_double[1]), static_cast<float>(v_double[2]), static_cast<float>(v_double[3]), 0.0, 0.0, 0.0, 0.0); v_double = cam1_calib["distortion_coefficients"].as<std::vector<double>>(); calib_param.Camera.cv_dist_coeff_lr[1] = (cv::Mat_<float>(8, 1) << static_cast<float>(v_double[0]), static_cast<float>(v_double[1]), static_cast<float>(v_double[2]), static_cast<float>(v_double[3]), 0.0, 0.0, 0.0, 0.0); //TBS v_double = cam0_calib["T_BS"]["data"].as<std::vector<double>>(); Eigen::Matrix4d b_t_c0 = Eigen::Map<Eigen::Matrix<double, 4, 4, Eigen::RowMajor>>(&v_double[0]); v_double = cam1_calib["T_BS"]["data"].as<std::vector<double>>(); Eigen::Matrix4d b_t_c1 = Eigen::Map<Eigen::Matrix<double, 4, 4, Eigen::RowMajor>>(&v_double[0]); v_double = imu0_calib["T_BS"]["data"].as<std::vector<double>>(); Eigen::Matrix4d b_t_i = Eigen::Map<Eigen::Matrix<double, 4, 4, Eigen::RowMajor>>(&v_double[0]); // ASL {B}ody frame is the IMU // {D}evice frame is the left camera Eigen::Matrix4d d_t_cam0 = Eigen::Matrix4d::Identity(); Eigen::Matrix4d d_t_b = d_t_cam0 * b_t_c0.inverse(); Eigen::Matrix4d d_t_cam1 = d_t_b * b_t_c1; Eigen::Matrix4d d_t_imu = d_t_b * b_t_i; calib_param.Camera.D_T_C_lr[0] = Eigen::Matrix4f::Identity(); calib_param.Camera.D_T_C_lr[1] = d_t_cam1.cast<float>(); // Image size std::vector<int> v_int = cam0_calib["resolution"].as<std::vector<int>>(); calib_param.Camera.img_size = cv::Size(v_int[0], v_int[1]); // IMU calib_param.Imu.accel_TK = Eigen::Matrix3f::Identity(); calib_param.Imu.accel_bias = Eigen::Vector3f::Zero(); calib_param.Imu.gyro_TK = Eigen::Matrix3f::Identity(); calib_param.Imu.gyro_bias = Eigen::Vector3f::Zero(); calib_param.Imu.accel_noise_var = Eigen::Vector3f{0.0016, 0.0016, 0.0016}; calib_param.Imu.angv_noise_var = Eigen::Vector3f{0.0001, 0.0001, 0.0001}; calib_param.Imu.D_T_I = d_t_imu.cast<float>(); calib_param.device_id = "ASL"; calib_param.sensor_type = XP::DuoCalibParam::SensorType::UNKNOWN; calib_param.initUndistortMap(calib_param.Camera.img_size); } float get_timestamp_from_img_name(const string& img_name, uint64_t offset_ns) { string ts_ns_string = fs::path(img_name).stem().string(); int64_t offset_t = boost::lexical_cast<uint64_t>(ts_ns_string) - offset_ns; int64_t t = offset_t/1e5; return static_cast<float>(t)/1e4; } bool convert_to_asl_timestamp(const string& file_in, const string& file_out, uint64_t offset_ns) { FILE *fp_in = fopen(file_in.c_str(), "r"); FILE *fp_out = fopen(file_out.c_str(), "w"); if (!fp_in || !fp_out) { LOG(ERROR) << "convert to asl timestamp error"; return false; } float t; float x, y, z; float qx, qy, qz, qw; while (fscanf(fp_in, "%f %f %f %f %f %f %f %f", &t, &x, &y, &z, &qx, &qy, &qz, &qw) == 8) { double t_s = t + static_cast<double>(offset_ns*1e-9); fprintf(fp_out, "%lf %f %f %f %f %f %f %f\n", t_s, x, y, z, qx, qy, qz, qw); } fclose(fp_in); fclose(fp_out); } inline bool cmp_by_class_id(const cv::KeyPoint& lhs, const cv::KeyPoint& rhs) { return lhs.class_id < rhs.class_id; } template <typename T> void InitPOD(T& t) { memset(&t, 0, sizeof(t)); } bool create_iba_frame(const vector<cv::KeyPoint>& kps_l, const vector<cv::KeyPoint>& kps_r, const vector<XP::ImuData>& imu_samples, const float rig_time, IBA::CurrentFrame* ptrCF, IBA::KeyFrame* ptrKF) { CHECK(std::is_sorted(kps_l.begin(), kps_l.end(), cmp_by_class_id)); CHECK(std::is_sorted(kps_r.begin(), kps_r.end(), cmp_by_class_id)); CHECK(std::includes(kps_l.begin(), kps_l.end(), kps_r.begin(), kps_r.end(), cmp_by_class_id)); // IBA will handle *unknown* initial depth values IBA::Depth kUnknownDepth; kUnknownDepth.d = 0.0f; kUnknownDepth.s2 = 0.0f; static int last_added_point_id = -1; static int iba_iFrm = 0; auto kp_it_l = kps_l.cbegin(), kp_it_r = kps_r.cbegin(); IBA::CurrentFrame& CF = *ptrCF; IBA::KeyFrame& KF = *ptrKF; CF.iFrm = iba_iFrm; InitPOD(CF.C); // needed to ensure the dumped frame deterministic even for unused field CF.C.C.R[0][0] = CF.C.v[0] = CF.C.ba[0] = CF.C.bw[0] = FLT_MAX; // MapPointMeasurement, process in ascending class id, left camera to right // Note the right keypoints is a subset of the left ones IBA::MapPointMeasurement mp_mea; InitPOD(mp_mea); mp_mea.x.S[0][0] = mp_mea.x.S[1][1] = 1.f; mp_mea.x.S[0][1] = mp_mea.x.S[1][0] = 0.f; for (; kp_it_l != kps_l.cend() && kp_it_l->class_id <= last_added_point_id; ++kp_it_l) { mp_mea.idx = kp_it_l->class_id; mp_mea.x.x[0] = kp_it_l->pt.x; mp_mea.x.x[1] = kp_it_l->pt.y; mp_mea.right = false; CF.zs.push_back(mp_mea); if (kp_it_r != kps_r.cend() && kp_it_r->class_id == kp_it_l->class_id) { mp_mea.x.x[0] = kp_it_r->pt.x; mp_mea.x.x[1] = kp_it_r->pt.y; mp_mea.right = true; CF.zs.push_back(mp_mea); ++kp_it_r; } } std::transform(imu_samples.begin(), imu_samples.end(), std::back_inserter(CF.us), XP::to_iba_imu); CF.t = rig_time; CF.d = kUnknownDepth; bool need_new_kf = std::distance(kp_it_l, kps_l.end()) >= 20 || CF.zs.size() < 20; if (!need_new_kf) KF.iFrm = -1; else LOG(INFO) << "new keyframe " << KF.iFrm; if (!need_new_kf) { KF.iFrm = -1; // to make it deterministic InitPOD(KF.C); InitPOD(KF.d); } else { KF.iFrm = CF.iFrm; KF.C = CF.C.C; // MapPointMeasurement, duplication of CF KF.zs = CF.zs; // MapPoint for(; kp_it_l != kps_l.cend(); ++kp_it_l) { IBA::MapPoint mp; InitPOD(mp.X); mp.X.idx = kp_it_l->class_id; mp.X.X[0] = FLT_MAX; mp_mea.iFrm = iba_iFrm; mp_mea.x.x[0] = kp_it_l->pt.x; mp_mea.x.x[1] = kp_it_l->pt.y; mp_mea.right = false; mp.zs.push_back(mp_mea); if (kp_it_r != kps_r.cend() && kp_it_r->class_id == kp_it_l->class_id) { mp_mea.x.x[0] = kp_it_r->pt.x; mp_mea.x.x[1] = kp_it_r->pt.y; mp_mea.right = true; mp.zs.push_back(mp_mea); kp_it_r++; } else { LOG(WARNING) << "add new feature point " << kp_it_l->class_id << " only found in left image"; } KF.Xs.push_back(mp); } last_added_point_id = std::max(KF.Xs.back().X.idx, last_added_point_id); KF.d = kUnknownDepth; } ++iba_iFrm; return true; } int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); google::ParseCommandLineFlags(&argc, &argv, true); google::InstallFailureSignalHandler(); if (FLAGS_imgs_folder.empty()) { google::ShowUsageWithFlags(argv[0]); return -1; } vector<string> img_file_paths; vector<string> slave_img_file_paths; constexpr int reserve_num = 5000; img_file_paths.reserve(reserve_num); slave_img_file_paths.reserve(reserve_num); fs::path p(FLAGS_imgs_folder + "/mav0/cam0"); if (!fs::is_directory(p)) { LOG(ERROR) << p << " is not a directory"; return -1; } vector<string> limg_name, rimg_name; load_image_data(FLAGS_imgs_folder, limg_name, rimg_name); for (int i=0; i<limg_name.size(); i++) { string l_png = p.string() + "/data/" + limg_name[i]; img_file_paths.push_back(l_png); slave_img_file_paths.push_back(FLAGS_imgs_folder + "/mav0/cam1/data/" + rimg_name[i]); } if (img_file_paths.size() == 0) { LOG(ERROR) << "No image files for detection"; return -1; } XP::DuoCalibParam duo_calib_param; try { load_asl_calib(FLAGS_imgs_folder, duo_calib_param); } catch (...){ LOG(ERROR) << "Load calibration file error"; return -1; } // Create masks based on FOVs computed from intrinsics std::vector<cv::Mat_<uchar> > masks(2); for (int lr = 0; lr < 2; ++lr) { float fov; if (XP::generate_cam_mask(duo_calib_param.Camera.cv_camK_lr[lr], duo_calib_param.Camera.cv_dist_coeff_lr[lr], duo_calib_param.Camera.img_size, &masks[lr], &fov)) { std::cout << "camera " << lr << " fov: " << fov << " deg\n"; } } // Load IMU samples to predict OF point locations std::list<XP::ImuData> imu_samples; std::string imu_file = FLAGS_imgs_folder + "/mav0/imu0/data.csv"; uint64_t offset_ts_ns; if (load_imu_data(imu_file, &imu_samples, offset_ts_ns) > 0) { std::cout << "Load imu data. Enable OF prediciton with gyro\n"; } else { std::cout << "Cannot load imu data.\n"; return -1; } // Adjust end image index for detection if (FLAGS_end_idx < 0 || FLAGS_end_idx > img_file_paths.size()) { FLAGS_end_idx = img_file_paths.size(); } FLAGS_start_idx = std::max(0, FLAGS_start_idx); // remove all frames before the first IMU data while (FLAGS_start_idx < FLAGS_end_idx && get_timestamp_from_img_name(img_file_paths[FLAGS_start_idx], offset_ts_ns) <= imu_samples.front().time_stamp) FLAGS_start_idx++; XP::FeatureTrackDetector feat_track_detector(FLAGS_ft_len, FLAGS_ft_droprate, !FLAGS_not_use_fast, FLAGS_uniform_radius, duo_calib_param.Camera.img_size); XP::ImgFeaturePropagator slave_img_feat_propagator( duo_calib_param.Camera.cameraK_lr[1], // cur_camK duo_calib_param.Camera.cameraK_lr[0], // ref_camK duo_calib_param.Camera.cv_dist_coeff_lr[1], // cur_dist_coeff duo_calib_param.Camera.cv_dist_coeff_lr[0], // ref_dist_coeff masks[1], FLAGS_pyra_level, FLAGS_min_feature_distance_over_baseline_ratio, FLAGS_max_feature_distance_over_baseline_ratio); const Eigen::Matrix4f T_Cl_Cr = duo_calib_param.Camera.D_T_C_lr[0].inverse() * duo_calib_param.Camera.D_T_C_lr[1]; XP::PoseViewer pose_viewer; pose_viewer.set_clear_canvas_before_draw(true); IBA::Solver solver; Eigen::Vector3f last_position = Eigen::Vector3f::Zero(); float travel_dist = 0.f; solver.Create(to_iba_calibration(duo_calib_param), 257, IBA_VERBOSE_NONE, IBA_DEBUG_NONE, 257, FLAGS_iba_param_path, "" /* iba directory */); solver.SetCallbackLBA([&](const int iFrm, const float ts) { #ifndef __DUO_VIO_TRACKER_NO_DEBUG__ VLOG(1) << "===== start ibaCallback at ts = " << ts; #endif // as we may be able to send out information directly in the callback arguments IBA::SlidingWindow sliding_window; solver.GetSlidingWindow(&sliding_window); const IBA::CameraIMUState& X = sliding_window.CsLF.back(); const IBA::CameraPose& C = X.C; Eigen::Matrix4f W_vio_T_S = Eigen::Matrix4f::Identity(); // W_vio_T_S for (int i = 0; i < 3; ++i) { W_vio_T_S(i, 3) = C.p[i]; for (int j = 0; j < 3; ++j) { W_vio_T_S(i, j) = C.R[j][i]; // C.R is actually R_SW } } Eigen::Matrix<float, 9, 1> speed_and_biases; for (int i = 0; i < 3; ++i) { speed_and_biases(i) = X.v[i]; speed_and_biases(i + 3) = X.ba[i]; speed_and_biases(i + 6) = X.bw[i]; } Eigen::Vector3f cur_position = W_vio_T_S.topRightCorner(3, 1); travel_dist += (cur_position - last_position).norm(); last_position = cur_position; pose_viewer.addPose(W_vio_T_S, speed_and_biases, travel_dist); }); solver.Start(); float prev_time_stamp = 0.0f; // load previous image std::vector<cv::KeyPoint> pre_image_key_points; cv::Mat pre_image_features; for (int it_img = FLAGS_start_idx; it_img < FLAGS_end_idx; ++it_img) { VLOG(0) << " start detection at ts = " << fs::path(img_file_paths[it_img]).stem().string(); auto read_img_start = std::chrono::high_resolution_clock::now(); cv::Mat img_in_raw; img_in_raw = cv::imread(img_file_paths[it_img], CV_LOAD_IMAGE_GRAYSCALE); CHECK_EQ(img_in_raw.rows, duo_calib_param.Camera.img_size.height); CHECK_EQ(img_in_raw.cols, duo_calib_param.Camera.img_size.width); cv::Mat img_in_smooth; cv::blur(img_in_raw, img_in_smooth, cv::Size(3, 3)); if (img_in_smooth.rows == 0) { LOG(ERROR) << "Cannot load " << img_file_paths[it_img]; return -1; } // get timestamp from image file name (s) const float time_stamp = get_timestamp_from_img_name(img_file_paths[it_img], offset_ts_ns); std::vector<cv::KeyPoint> key_pnts; cv::Mat orb_feat; cv::Mat pre_img_in_smooth; // load slave image cv::Mat slave_img_smooth; // for visualization later std::vector<cv::KeyPoint> key_pnts_slave; cv::Mat orb_feat_slave; std::vector<XP::ImuData> imu_meas; // Get the imu measurements within prev_time_stamp and time_stamp to compute old_R_new imu_meas.reserve(10); for (auto it_imu = imu_samples.begin(); it_imu != imu_samples.end(); ) { if (it_imu->time_stamp < time_stamp) { imu_meas.push_back(*it_imu); it_imu++; imu_samples.pop_front(); } else { break; } } VLOG(1) << "imu_meas size = " << imu_meas.size(); VLOG(1) << "img ts prev -> curr " << prev_time_stamp << " -> " << time_stamp; if (imu_meas.size() > 0) { VLOG(1) << "imu ts prev -> curr " << imu_meas.front().time_stamp << " -> " << imu_meas.back().time_stamp; } if (!slave_img_file_paths.empty()) { if (!slave_img_file_paths[it_img].empty()) { cv::Mat slave_img_in; slave_img_in = cv::imread(slave_img_file_paths[it_img], CV_LOAD_IMAGE_GRAYSCALE); cv::blur(slave_img_in, slave_img_smooth, cv::Size(3, 3)); } } // use optical flow from the 1st frame if (it_img != FLAGS_start_idx) { CHECK(it_img >= 1); VLOG(1) << "pre_image_key_points.size(): " << pre_image_key_points.size(); const int request_feat_num = FLAGS_max_num_per_grid * FLAGS_grid_row_num * FLAGS_grid_col_num; feat_track_detector.build_img_pyramids(img_in_smooth, XP::FeatureTrackDetector::BUILD_TO_CURR); if (imu_meas.size() > 1) { // Here we simply the transformation chain to rotation only and assume zero translation cv::Matx33f old_R_new; XP::XpQuaternion I_new_q_I_old; // The rotation between the new {I} and old {I} for (size_t i = 1; i < imu_meas.size(); ++i) { XP::XpQuaternion q_end; XP::IntegrateQuaternion(imu_meas[i - 1].ang_v, imu_meas[i].ang_v, I_new_q_I_old, imu_meas[i].time_stamp - imu_meas[i - 1].time_stamp, &q_end); I_new_q_I_old = q_end; } Eigen::Matrix3f I_new_R_I_old = I_new_q_I_old.ToRotationMatrix(); Eigen::Matrix4f I_T_C = duo_calib_param.Imu.D_T_I.inverse() * duo_calib_param.Camera.D_T_C_lr[0]; Eigen::Matrix3f I_R_C = I_T_C.topLeftCorner<3, 3>(); Eigen::Matrix3f C_new_R_C_old = I_R_C.transpose() * I_new_R_I_old * I_R_C; for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { old_R_new(j, i) = C_new_R_C_old(i, j); } } if (VLOG_IS_ON(1)) { XP::XpQuaternion C_new_q_C_old; C_new_q_C_old.SetFromRotationMatrix(C_new_R_C_old); VLOG(1) << "C_new_R_C_old = \n" << C_new_R_C_old; VLOG(1) << "ea =\n" << C_new_q_C_old.ToEulerRadians() * 180 / M_PI; } feat_track_detector.optical_flow_and_detect(masks[0], pre_image_features, pre_image_key_points, request_feat_num, FLAGS_pyra_level, FLAGS_fast_thresh, &key_pnts, &orb_feat, cv::Vec2f(0, 0), // shift init pixels &duo_calib_param.Camera.cv_camK_lr[0], &duo_calib_param.Camera.cv_dist_coeff_lr[0], &old_R_new); } else { feat_track_detector.optical_flow_and_detect(masks[0], pre_image_features, pre_image_key_points, request_feat_num, FLAGS_pyra_level, FLAGS_fast_thresh, &key_pnts, &orb_feat); } feat_track_detector.update_img_pyramids(); VLOG(1) << "after OF key_pnts.size(): " << key_pnts.size() << " requested # " << FLAGS_max_num_per_grid * FLAGS_grid_row_num * FLAGS_grid_col_num; } else { // first frame feat_track_detector.detect(img_in_smooth, masks[0], FLAGS_max_num_per_grid * FLAGS_grid_row_num * FLAGS_grid_col_num, FLAGS_pyra_level, FLAGS_fast_thresh, &key_pnts, &orb_feat); feat_track_detector.build_img_pyramids(img_in_smooth, XP::FeatureTrackDetector::BUILD_TO_PREV); } if (slave_img_smooth.rows > 0) { CHECK(orb_feat_slave.empty()); auto det_slave_img_start = std::chrono::high_resolution_clock::now(); slave_img_feat_propagator.PropagateFeatures(slave_img_smooth, // cur img_in_smooth, // ref key_pnts, T_Cl_Cr, // T_ref_cur &key_pnts_slave, &orb_feat_slave, false); // draw_debug VLOG(1) << "detect slave key_pnts.size(): " << key_pnts.size() << " takes " << std::chrono::duration_cast<std::chrono::microseconds>( std::chrono::high_resolution_clock::now() - det_slave_img_start).count() / 1e3 << " ms"; } std::sort(key_pnts.begin(), key_pnts.end(), cmp_by_class_id); std::sort(key_pnts_slave.begin(), key_pnts_slave.end(), cmp_by_class_id); // push to IBA IBA::CurrentFrame CF; IBA::KeyFrame KF; create_iba_frame(key_pnts, key_pnts_slave, imu_meas, time_stamp, &CF, &KF); solver.PushCurrentFrame(CF, KF.iFrm == -1 ? nullptr : &KF); pre_image_key_points = key_pnts; pre_image_features = orb_feat.clone(); // show pose pose_viewer.displayTo("trajectory"); cv::waitKey(1); prev_time_stamp = time_stamp; } std::string temp_file = "/tmp/" + std::to_string(offset_ts_ns) + ".txt"; solver.SaveCamerasGBA(temp_file, false /* append */, true /* pose only */); solver.Stop(); solver.Destroy(); // for comparsion with asl groundtruth convert_to_asl_timestamp(temp_file, FLAGS_gba_camera_save_path, offset_ts_ns); return 0; }
#pragma once // addrinfo_cache - a cache of the results of calling getaddrinfo. // // addrinfo_cache addresses the overhead and unpredictability of // calling getaddrinfo. Usually, getaddrinfo returns in under a // millisecond. But sometimes it accesses the filesystem // (/etc/hosts), or network services (DNS) and (especially under // heavy load, or during network outages) it can take seconds or // even minutes to return. // // Values "returned" by getaddrinfo are recorded in a: // // struct addrinfo_result{ // int status; // struct addrinfo* aip; // int eno; // non-zero only if status==EAI_SYSTEM // } // // which are cached so they can be found quickly - avoiding system // calls and network traffic on the caller's critical path. // // The lookup() member takes arguments that are analogous to // getaddrinfo's and returns a shared_ptr to an addrinfo_result. // // std::shared_ptr<addrinfo_result> // addrinfo_cache::lookup(const std::string& name, // const std::string& service, // addrinfo* hints = nullptr) // // if a result is in the cache, then return it. // // if a result is not in the cache, call getaddrinfo, and if the // status is not EAI_AGAIN, record the result in the cache. Return // the result. // // N.B. If name or service is an empty string, the corresponding // argument to gettaddrinfo will be a nullptr. // // The cache is refreshed explicitly, at the caller's request by: // // void // addrinfo_cache::refresh(size_t max_size=100) // // which removes least recently used entries until the cache has // fewer entries than the specified max_size, then refreshes every // remaining cached addrinfo_result by calling getaddrinfo again, // with the same arguments. The cached entry is left unchanged if // getaddrinfo returns EAI_AGAIN. // // Refresh might take a long time and is expected to be called from // time to time, in a background thread, off the critical path. // E.g., something like: // // core123::addrinfo_cache aic; // core123::periodic refresh_thread{[&](){ // aic.refresh(); // return std::chrono::minutes(1); // }}; // // Theoretically, very large caches can be accomodated, but since // refresh takes time proportional to the cache size, care should be // taken with caches much bigger than a few hundred entries. // addrinfo_cache is probably not the right tool to manage millions // of names. // // Informational methods: // size_t size() - returns the number of names in the cache. // size_t eai_again_count() - returns the number of times getaddrinfo // has returned EAI_AGAIN. // THREAD SAFETY: all member functions are thread safe and are // synchronized so they may be called concurrently. // // BLOCKING: Calls to getaddrinfo are potentially slow and may block // for seconds or minutes. But getaddrinfo is only called when // lookup() misses the cache (the first time a particular // combination of args is presented to lookup) and by refresh() (off // the critical path). // // CAVEATS: Note that the cache grows whenever a lookup is called with // 'new' arguments. The cache shrinks when refresh is called. The // addrinfo_cache should perform well up to a max_size of a few // hundred entries, but beyond that, refresh may become painfully // slow. // // ENHANCEMENT: Could be more generic. The details of calling // getaddrinfo and freeaddrinfo could be separated from the generic // machinery of maintaining the map and the lru list. // // ISSUES: Calling getaddrinfo(3) is not simple. Interpreting the // results is not simple. This module does not help with any of // that. See man getaddrinfo(3) for details. #include <memory> #include <mutex> #include <map> #include <string> #include <atomic> #include <cstring> #include <cerrno> #include <sys/types.h> #include <sys/socket.h> #include <netdb.h> namespace core123{ namespace detail{ struct gai_args{ std::string name; std::string service; struct addrinfo hints; bool operator<(const gai_args& rhs) const{ return std::tie(name, service, hints.ai_family, hints.ai_socktype, hints.ai_protocol, hints.ai_flags) < std::tie(rhs.name, rhs.service, rhs.hints.ai_family, rhs.hints.ai_socktype, rhs.hints.ai_protocol, rhs.hints.ai_flags); } gai_args(const std::string& _name, const std::string& _service, struct addrinfo* _hints) : name(_name), service(_service) { if(_hints){ hints = *_hints; // the caller should have zero'ed these, but let's not trust the caller. hints.ai_addrlen = 0; hints.ai_addr = nullptr; hints.ai_canonname = nullptr; hints.ai_next = nullptr; }else{ std::memset(&hints, 0, sizeof(hints)); hints.ai_family = AF_UNSPEC; hints.ai_flags = AI_V4MAPPED | AI_ADDRCONFIG; } } }; } // namespace detail struct addrinfo_result{ int status; struct addrinfo* aip; int eno; // non-zero only if status==EAI_SYSTEM addrinfo_result(const detail::gai_args& args){ aip = nullptr; status = ::getaddrinfo(args.name.empty()? nullptr : args.name.c_str(), args.service.empty()? nullptr : args.service.c_str(), &args.hints, &aip); eno = (status == EAI_SYSTEM) ? errno : 0; } // Non-copyable. We don't want to call freeaddrinfo twice! addrinfo_result(const addrinfo_result&) = delete; addrinfo_result& operator=(const addrinfo_result&) = delete; ~addrinfo_result(){ if(aip) ::freeaddrinfo(aip); } }; namespace detail{ struct gai_mrecord; using gai_map_t = std::map<gai_args, gai_mrecord>; struct gai_mrecord{ std::shared_ptr<addrinfo_result> air; // more_ru and less_ru constitute a linked list of map iterators // in most-recently-used order. They are not initialized by the // ctor, but are assigned when the gai_mrecord is inserted into // the_map. The more_ru member of the *most_ru and the less_ru // member of *least_ru remain undefined, and may not be used, even // for gai_mrecords in the_map. gai_map_t::iterator less_ru; gai_map_t::iterator more_ru; gai_mrecord(std::shared_ptr<addrinfo_result> _air): air(_air) {} }; } // namespace detail struct addrinfo_cache{ std::shared_ptr<addrinfo_result> lookup(const std::string& name, const std::string& service, addrinfo* hints = nullptr){ detail::gai_args args(name, service, hints); std::unique_lock<std::mutex> lk(map_mtx); auto p = the_map.find(args); if(p!=the_map.end()){ make_most_ru(p); _hit_count++; return p->second.air; } lk.unlock(); auto newvalue = std::make_shared<addrinfo_result>(args); // might be slow _miss_count++; if(newvalue->status == EAI_AGAIN){ _eai_again_count++; return newvalue; // return it, but don't record it in the_map. } lk.lock(); bool inserted; std::tie(p, inserted) = the_map.insert(std::make_pair(args, detail::gai_mrecord{newvalue})); if(!inserted){ // rarely - only if another was inserted while we weren't holding the lock // NOT WELL TESTED! p->second.air = newvalue; make_most_ru(p); return p->second.air; } if(the_map.size()==1){ least_ru = p; }else{ // make p more ru than the previous most_ru most_ru->second.more_ru = p; p->second.less_ru = most_ru; } most_ru = p; return newvalue; } void refresh(size_t max_size = 100){ std::lock_guard<std::mutex> refresh_lg(refresh_mtx); std::unique_lock<std::mutex> lk(map_mtx); while(the_map.size() > max_size){ auto p = least_ru->second.more_ru; the_map.erase(least_ru); _erase_count++; least_ru = p; } // It's safe to iterate over the_map because the only place // erase is called is a few lines above, and we're safely // under the same refresh_lg lock_guard, so no other threads // can be erase-ing behind our back. for(auto& e : the_map){ lk.unlock(); auto newvalue = std::make_shared<addrinfo_result>(e.first); // might be slow _refresh_count++; lk.lock(); if(newvalue->status != EAI_AGAIN) // don't update the_map with transient failures e.second.air = newvalue; else _eai_again_count++; } } size_t size() const{ std::unique_lock<std::mutex> lk(map_mtx); return the_map.size(); } size_t eai_again_count() const{ return _eai_again_count; } size_t hit_count() const{ return _hit_count; } size_t miss_count() const{ return _miss_count; } size_t refresh_count() const{ return _refresh_count; } size_t erase_count() const{ return _erase_count; } addrinfo_cache() = default; // Non-copyable addrinfo_cache(const addrinfo_cache&) = delete; addrinfo_cache& operator=(const addrinfo_cache&) = delete; private: detail::gai_map_t the_map; using iter = detail::gai_map_t::iterator; std::atomic<size_t> _eai_again_count{0}; // count of EAI_AGAIN returns std::atomic<size_t> _hit_count{0}; std::atomic<size_t> _miss_count{0}; std::atomic<size_t> _refresh_count{0}; std::atomic<size_t> _erase_count{0}; // most_ru and least_ru are the head and tail of an auxiliary // linked list in recently-used order. They are uninitialized, // and may not be used unless the_map contains at least one entry. iter most_ru; iter least_ru; // Member functions hold the map_mtx whenever modifying the_map. // The map_mtx is released when blocking getaddrinfo is called. mutable std::mutex map_mtx; // Only one refresh may run concurrently. The refresh_mtx is held // by refresh() through getaddrinfo calls. mutable std::mutex refresh_mtx; void make_most_ru(iter p){ // Pre-condition: p is a bona fide initialized iterator into the_map // and its ru_less and ru_more members connect it to the ru-list. if(p==most_ru) return; // N.B. p is in the ru-list, and it is not the most_ru, so // there are at least two entries in the ru-list now. detail::gai_mrecord& pmr = p->second; if(p==least_ru){ // disconnect pmr by moving the least_ru link "forward" least_ru = pmr.more_ru; // N.B. least_ru->second.less_ru is undefined/unusable }else{ // disconnect pmr by relink "around" it pmr.less_ru->second.more_ru = pmr.more_ru; pmr.more_ru->second.less_ru = pmr.less_ru; } // reconnect it at the most_ru end: pmr.less_ru = most_ru; // N.B. pmr.more_ru is undefined/unusable most_ru->second.more_ru = p; most_ru = p; } // _check_invariant should *never* fail. If it does, it's a bug // (logic_error), not a runtime_error. But since a bug slipped // through early testing, we keep _check_invariant around to be // used in unit tests. It takes time O(s*log(s)) where // s=the_map.size(). It's all internal map iterator derefs and // finds - no calls to getaddr_info, so it's not *that* slow. BUT // it holds the lock for the entire time that it runs, so it shouldn't // be called frequently in production. void check_invariant_already_locked() const{ // HACK!!! Maybe we should make a genuine core123::assert ?? #define core123__assert(P) do{ if(!(P)) throw std::runtime_error("invariant violated: " #P); }while(0) if(the_map.size() == 0) return; // for each item, p, in the ru-list: // check the invariant that p really is "in" // the map and that p->less->more == p auto p = most_ru; size_t n = 1; while( true ){ core123__assert(the_map.find(p->first) == p); if(p == least_ru) break; auto less = p->second.less_ru; core123__assert(less->second.more_ru == p); p = less; ++n; } core123__assert(n == the_map.size()); #undef core123__assert } public: void _check_invariant() const{ std::unique_lock<std::mutex> lk(map_mtx); check_invariant_already_locked(); } }; } // namespace core123
// // Copyright (c) 2009, Markus Rickert // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // #include <iostream> #include <memory> #include <stdexcept> #include <boost/lexical_cast.hpp> #include <rl/math/Constants.h> #include <rl/mdl/Kinematic.h> #include <rl/mdl/XmlFactory.h> #include <rl/plan/KdtreeNearestNeighbors.h> #include <rl/plan/Prm.h> #include <rl/plan/RecursiveVerifier.h> #include <rl/plan/SimpleModel.h> #include <rl/plan/SimpleOptimizer.h> #include <rl/plan/UniformSampler.h> #include <rl/sg/Model.h> #include <rl/sg/XmlFactory.h> #ifdef RL_SG_BULLET #include <rl/sg/bullet/Scene.h> #endif // RL_SG_BULLET #ifdef RL_SG_FCL #include <rl/sg/fcl/Scene.h> #endif // RL_SG_FCL #ifdef RL_SG_ODE #include <rl/sg/ode/Scene.h> #endif // RL_SG_ODE #ifdef RL_SG_PQP #include <rl/sg/pqp/Scene.h> #endif // RL_SG_PQP #ifdef RL_SG_SOLID #include <rl/sg/solid/Scene.h> #endif // RL_SG_SOLID int main(int argc, char** argv) { if (argc < 14) { std::cout << "Usage: rlPrmTest ENGINE SCENEFILE KINEMATICSFILE EXPECTED_NUM_VERTICES_MAX EXPECTED_NUM_EDGES_MAX X Y Z A B C START1 ... STARTn GOAL1 ... GOALn" << std::endl; return EXIT_FAILURE; } try { std::shared_ptr<rl::sg::Scene> scene; #ifdef RL_SG_BULLET if ("bullet" == std::string(argv[1])) { scene = std::make_shared<rl::sg::bullet::Scene>(); } #endif // RL_SG_BULLET #ifdef RL_SG_FCL if ("fcl" == std::string(argv[1])) { scene = std::make_shared<rl::sg::fcl::Scene>(); } #endif // RL_SG_FCL #ifdef RL_SG_ODE if ("ode" == std::string(argv[1])) { scene = std::make_shared<rl::sg::ode::Scene>(); } #endif // RL_SG_ODE #ifdef RL_SG_PQP if ("pqp" == std::string(argv[1])) { scene = std::make_shared<rl::sg::pqp::Scene>(); } #endif // RL_SG_PQP #ifdef RL_SG_SOLID if ("solid" == std::string(argv[1])) { scene = std::make_shared<rl::sg::solid::Scene>(); } #endif // RL_SG_SOLID rl::sg::XmlFactory factory1; factory1.load(argv[2], scene.get()); rl::mdl::XmlFactory factory2; std::shared_ptr<rl::mdl::Kinematic> kinematic = std::dynamic_pointer_cast<rl::mdl::Kinematic>(factory2.create(argv[3])); rl::math::Transform world = rl::math::Transform::Identity(); world = rl::math::AngleAxis( boost::lexical_cast<rl::math::Real>(argv[11]) * ::rl::math::constants::deg2rad, ::rl::math::Vector3::UnitZ() ) * ::rl::math::AngleAxis( boost::lexical_cast<rl::math::Real>(argv[10]) * ::rl::math::constants::deg2rad, ::rl::math::Vector3::UnitY() ) * ::rl::math::AngleAxis( boost::lexical_cast<rl::math::Real>(argv[9]) * ::rl::math::constants::deg2rad, ::rl::math::Vector3::UnitX() ); world.translation().x() = boost::lexical_cast<rl::math::Real>(argv[6]); world.translation().y() = boost::lexical_cast<rl::math::Real>(argv[7]); world.translation().z() = boost::lexical_cast<rl::math::Real>(argv[8]); kinematic->world() = world; rl::plan::SimpleModel model; model.mdl = kinematic.get(); model.model = scene->getModel(0); model.scene = scene.get(); rl::plan::KdtreeNearestNeighbors nearestNeighbors(&model); rl::plan::Prm planner; rl::plan::UniformSampler sampler; rl::plan::RecursiveVerifier verifier; sampler.seed(0); planner.setModel(&model); planner.setNearestNeighbors(&nearestNeighbors); planner.setSampler(&sampler); planner.setVerifier(&verifier); sampler.setModel(&model); verifier.setDelta(1 * rl::math::constants::deg2rad); verifier.setModel(&model); rl::math::Vector start(kinematic->getDofPosition()); for (std::ptrdiff_t i = 0; i < start.size(); ++i) { start(i) = boost::lexical_cast<rl::math::Real>(argv[i + 12]) * rl::math::constants::deg2rad; } planner.setStart(&start); rl::math::Vector goal(kinematic->getDofPosition()); for (std::ptrdiff_t i = 0; i < goal.size(); ++i) { goal(i) = boost::lexical_cast<rl::math::Real>(argv[start.size() + i + 12]) * rl::math::constants::deg2rad; } planner.setGoal(&goal); planner.setDuration(std::chrono::seconds(20)); std::cout << "verify() ... " << std::endl;; bool verified = planner.verify(); std::cout << "verify() " << (verified ? "true" : "false") << std::endl; if (!verified) { return EXIT_FAILURE; } std::cout << "construct() ... " << std::endl;; std::chrono::steady_clock::time_point startTime = std::chrono::steady_clock::now(); planner.construct(15); std::chrono::steady_clock::time_point stopTime = std::chrono::steady_clock::now(); std::cout << "construct() " << std::chrono::duration_cast<std::chrono::duration<double>>(stopTime - startTime).count() * 1000 << " ms" << std::endl; std::cout << "solve() ... " << std::endl;; startTime = std::chrono::steady_clock::now(); bool solved = planner.solve(); stopTime = std::chrono::steady_clock::now(); std::cout << "solve() " << (solved ? "true" : "false") << " " << std::chrono::duration_cast<std::chrono::duration<double>>(stopTime - startTime).count() * 1000 << " ms" << std::endl; std::cout << "NumVertices: " << planner.getNumVertices() << " NumEdges: " << planner.getNumEdges() << std::endl; if (solved) { if (boost::lexical_cast<std::size_t>(argv[4]) >= planner.getNumVertices() && boost::lexical_cast<std::size_t>(argv[5]) >= planner.getNumEdges()) { return EXIT_SUCCESS; } else { std::cerr << "NumVertices and NumEdges are more than expected for this test case."; return EXIT_FAILURE; } } return EXIT_FAILURE; } catch (const std::exception& e) { std::cout << e.what() << std::endl; return EXIT_FAILURE; } }
/* * Copyright (c) 2004-present, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #include "fboss/agent/hw/bcm/tests/BcmPortUtils.h" #include "fboss/agent/hw/bcm/BcmError.h" namespace facebook { namespace fboss { namespace utility { bool portEnabled(int unit, opennsl_port_t port) { int enable = -1; auto rv = opennsl_port_enable_get(unit, port, &enable); bcmCheckError(rv, "failed to get port enable status"); CHECK(enable == 0 || enable == 1); return (enable == 1); } cfg::PortSpeed curPortSpeed(int unit, opennsl_port_t port) { int curSpeed; auto ret = opennsl_port_speed_get(unit, port, &curSpeed); bcmCheckError(ret, "Failed to get current speed for port"); return cfg::PortSpeed(curSpeed); } void assertPort(int unit, int port, bool enabled, cfg::PortSpeed speed) { CHECK_EQ(enabled, portEnabled(unit, port)); if (enabled) { // Only verify speed on enabled ports CHECK(speed == utility::curPortSpeed(unit, port)); } } void assertPortStatus(int unit, int port) { CHECK(portEnabled(unit, port)); } void assertPortsLoopbackMode( int unit, const std::map<PortID, int>& port2LoopbackMode) { for (auto portAndLoopBackMode : port2LoopbackMode) { assertPortLoopbackMode( unit, portAndLoopBackMode.first, portAndLoopBackMode.second); } } opennsl_gport_t getPortGport(int unit, int port) { opennsl_gport_t portGport; auto rv = opennsl_port_gport_get(unit, port, &portGport); facebook::fboss::bcmCheckError(rv, "failed to get gport for port"); return portGport; } } // namespace utility } // namespace fboss } // namespace facebook
// Copyright (c) 2009-2018 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <chain.h> #include <key_io.h> #include <rpc/server.h> #include <validation.h> #include <script/script.h> #include <script/standard.h> #include <sync.h> #include <util.h> #include <utiltime.h> #include <wallet/wallet.h> #include <merkleblock.h> #include <core_io.h> #include <wallet/rpcwallet.h> #include <fstream> #include <stdint.h> #include <boost/algorithm/string.hpp> #include <boost/date_time/posix_time/posix_time.hpp> #include <univalue.h> int64_t static DecodeDumpTime(const std::string &str) { static const boost::posix_time::ptime epoch = boost::posix_time::from_time_t(0); static const std::locale loc(std::locale::classic(), new boost::posix_time::time_input_facet("%Y-%m-%dT%H:%M:%SZ")); std::istringstream iss(str); iss.imbue(loc); boost::posix_time::ptime ptime(boost::date_time::not_a_date_time); iss >> ptime; if (ptime.is_not_a_date_time()) return 0; return (ptime - epoch).total_seconds(); } std::string static EncodeDumpString(const std::string &str) { std::stringstream ret; for (unsigned char c : str) { if (c <= 32 || c >= 128 || c == '%') { ret << '%' << HexStr(&c, &c + 1); } else { ret << c; } } return ret.str(); } static std::string DecodeDumpString(const std::string &str) { std::stringstream ret; for (unsigned int pos = 0; pos < str.length(); pos++) { unsigned char c = str[pos]; if (c == '%' && pos+2 < str.length()) { c = (((str[pos+1]>>6)*9+((str[pos+1]-'0')&15)) << 4) | ((str[pos+2]>>6)*9+((str[pos+2]-'0')&15)); pos += 2; } ret << c; } return ret.str(); } static bool GetWalletAddressesForKey(CWallet * const pwallet, const CKeyID &keyid, std::string &strAddr, std::string &strLabel) { bool fLabelFound = false; CKey key; pwallet->GetKey(keyid, key); for (const auto& dest : GetAllDestinationsForKey(key.GetPubKey())) { if (pwallet->mapAddressBook.count(dest)) { if (!strAddr.empty()) { strAddr += ","; } strAddr += EncodeDestination(dest); strLabel = EncodeDumpString(pwallet->mapAddressBook[dest].name); fLabelFound = true; } } if (!fLabelFound) { strAddr = EncodeDestination(GetDestinationForKey(key.GetPubKey(), pwallet->m_default_address_type)); } return fLabelFound; } static const int64_t TIMESTAMP_MIN = 0; static void RescanWallet(CWallet& wallet, const WalletRescanReserver& reserver, int64_t time_begin = TIMESTAMP_MIN, bool update = true) { int64_t scanned_time = wallet.RescanFromTime(time_begin, reserver, update); if (wallet.IsAbortingRescan()) { throw JSONRPCError(RPC_MISC_ERROR, "Rescan aborted by user."); } else if (scanned_time > time_begin) { throw JSONRPCError(RPC_WALLET_ERROR, "Rescan was unable to fully rescan the blockchain. Some transactions may be missing."); } } UniValue importprivkey(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() < 1 || request.params.size() > 3) throw std::runtime_error( "importprivkey \"privkey\" ( \"label\" ) ( rescan )\n" "\nAdds a private key (as returned by dumpprivkey) to your wallet. Requires a new wallet backup.\n" "Hint: use importmulti to import more than one private key.\n" "\nArguments:\n" "1. \"privkey\" (string, required) The private key (see dumpprivkey)\n" "2. \"label\" (string, optional, default=\"\") An optional label\n" "3. rescan (boolean, optional, default=true) Rescan the wallet for transactions\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported key exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" "\nExamples:\n" "\nDump a private key\n" + HelpExampleCli("dumpprivkey", "\"myaddress\"") + "\nImport the private key with rescan\n" + HelpExampleCli("importprivkey", "\"mykey\"") + "\nImport using a label and without rescan\n" + HelpExampleCli("importprivkey", "\"mykey\" \"testing\" false") + "\nImport using default blank label and without rescan\n" + HelpExampleCli("importprivkey", "\"mykey\" \"\" false") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("importprivkey", "\"mykey\", \"testing\", false") ); WalletRescanReserver reserver(pwallet); bool fRescan = true; { LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::string strSecret = request.params[0].get_str(); std::string strLabel = ""; if (!request.params[1].isNull()) strLabel = request.params[1].get_str(); // Whether to perform rescan after import if (!request.params[2].isNull()) fRescan = request.params[2].get_bool(); if (fRescan && fPruneMode) throw JSONRPCError(RPC_WALLET_ERROR, "Rescan is disabled in pruned mode"); if (fRescan && !reserver.reserve()) { throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait."); } CKey key = DecodeSecret(strSecret); if (!key.IsValid()) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key encoding"); CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); CKeyID vchAddress = pubkey.GetID(); { pwallet->MarkDirty(); // We don't know which corresponding address will be used; label them all for (const auto& dest : GetAllDestinationsForKey(pubkey)) { pwallet->SetAddressBook(dest, strLabel, "receive"); } // Don't throw error in case a key is already there if (pwallet->HaveKey(vchAddress)) { return NullUniValue; } // whenever a key is imported, we need to scan the whole chain pwallet->UpdateTimeFirstKey(1); pwallet->mapKeyMetadata[vchAddress].nCreateTime = 1; if (!pwallet->AddKeyPubKey(key, pubkey)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet"); } pwallet->LearnAllRelatedScripts(pubkey); } } if (fRescan) { RescanWallet(*pwallet, reserver); } return NullUniValue; } UniValue abortrescan(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() > 0) throw std::runtime_error( "abortrescan\n" "\nStops current wallet rescan triggered by an RPC call, e.g. by an importprivkey call.\n" "\nExamples:\n" "\nImport a private key\n" + HelpExampleCli("importprivkey", "\"mykey\"") + "\nAbort the running wallet rescan\n" + HelpExampleCli("abortrescan", "") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("abortrescan", "") ); if (!pwallet->IsScanning() || pwallet->IsAbortingRescan()) return false; pwallet->AbortRescan(); return true; } static void ImportAddress(CWallet*, const CTxDestination& dest, const std::string& strLabel); static void ImportScript(CWallet* const pwallet, const CScript& script, const std::string& strLabel, bool isRedeemScript) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) { if (!isRedeemScript && ::IsMine(*pwallet, script) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->MarkDirty(); if (!pwallet->HaveWatchOnly(script) && !pwallet->AddWatchOnly(script, 0 /* nCreateTime */)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } if (isRedeemScript) { const CScriptID id(script); if (!pwallet->HaveCScript(id) && !pwallet->AddCScript(script)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding p2sh redeemScript to wallet"); } ImportAddress(pwallet, id, strLabel); } else { CTxDestination destination; if (ExtractDestination(script, destination)) { pwallet->SetAddressBook(destination, strLabel, "receive"); } } } static void ImportAddress(CWallet* const pwallet, const CTxDestination& dest, const std::string& strLabel) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) { CScript script = GetScriptForDestination(dest); ImportScript(pwallet, script, strLabel, false); // add to address book or update label if (IsValidDestination(dest)) pwallet->SetAddressBook(dest, strLabel, "receive"); } UniValue importaddress(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() < 1 || request.params.size() > 4) throw std::runtime_error( "importaddress \"address\" ( \"label\" rescan p2sh )\n" "\nAdds an address or script (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" "\nArguments:\n" "1. \"address\" (string, required) The Granacoin address (or hex-encoded script)\n" "2. \"label\" (string, optional, default=\"\") An optional label\n" "3. rescan (boolean, optional, default=true) Rescan the wallet for transactions\n" "4. p2sh (boolean, optional, default=false) Add the P2SH version of the script as well\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported address exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" "If you have the full public key, you should call importpubkey instead of this.\n" "\nNote: If you import a non-standard raw script in hex form, outputs sending to it will be treated\n" "as change, and not show up in many RPCs.\n" "\nExamples:\n" "\nImport an address with rescan\n" + HelpExampleCli("importaddress", "\"myaddress\"") + "\nImport using a label without rescan\n" + HelpExampleCli("importaddress", "\"myaddress\" \"testing\" false") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("importaddress", "\"myaddress\", \"testing\", false") ); std::string strLabel; if (!request.params[1].isNull()) strLabel = request.params[1].get_str(); // Whether to perform rescan after import bool fRescan = true; if (!request.params[2].isNull()) fRescan = request.params[2].get_bool(); if (fRescan && fPruneMode) throw JSONRPCError(RPC_WALLET_ERROR, "Rescan is disabled in pruned mode"); WalletRescanReserver reserver(pwallet); if (fRescan && !reserver.reserve()) { throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait."); } // Whether to import a p2sh version, too bool fP2SH = false; if (!request.params[3].isNull()) fP2SH = request.params[3].get_bool(); { LOCK2(cs_main, pwallet->cs_wallet); CTxDestination dest = DecodeDestination(request.params[0].get_str()); if (IsValidDestination(dest)) { if (fP2SH) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Cannot use the p2sh flag with an address - use a script instead"); } ImportAddress(pwallet, dest, strLabel); } else if (IsHex(request.params[0].get_str())) { std::vector<unsigned char> data(ParseHex(request.params[0].get_str())); ImportScript(pwallet, CScript(data.begin(), data.end()), strLabel, fP2SH); } else { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Granacoin address or script"); } } if (fRescan) { RescanWallet(*pwallet, reserver); pwallet->ReacceptWalletTransactions(); } return NullUniValue; } UniValue importprunedfunds(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 2) throw std::runtime_error( "importprunedfunds\n" "\nImports funds without rescan. Corresponding address or script must previously be included in wallet. Aimed towards pruned wallets. The end-user is responsible to import additional transactions that subsequently spend the imported outputs or rescan after the point in the blockchain the transaction is included.\n" "\nArguments:\n" "1. \"rawtransaction\" (string, required) A raw transaction in hex funding an already-existing address in wallet\n" "2. \"txoutproof\" (string, required) The hex output from gettxoutproof that contains the transaction\n" ); CMutableTransaction tx; if (!DecodeHexTx(tx, request.params[0].get_str())) throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "TX decode failed"); uint256 hashTx = tx.GetHash(); CWalletTx wtx(pwallet, MakeTransactionRef(std::move(tx))); CDataStream ssMB(ParseHexV(request.params[1], "proof"), SER_NETWORK, PROTOCOL_VERSION); CMerkleBlock merkleBlock; ssMB >> merkleBlock; //Search partial merkle tree in proof for our transaction and index in valid block std::vector<uint256> vMatch; std::vector<unsigned int> vIndex; unsigned int txnIndex = 0; if (merkleBlock.txn.ExtractMatches(vMatch, vIndex) == merkleBlock.header.hashMerkleRoot) { LOCK(cs_main); const CBlockIndex* pindex = LookupBlockIndex(merkleBlock.header.GetHash()); if (!pindex || !chainActive.Contains(pindex)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found in chain"); } std::vector<uint256>::const_iterator it; if ((it = std::find(vMatch.begin(), vMatch.end(), hashTx))==vMatch.end()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction given doesn't exist in proof"); } txnIndex = vIndex[it - vMatch.begin()]; } else { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Something wrong with merkleblock"); } wtx.nIndex = txnIndex; wtx.hashBlock = merkleBlock.header.GetHash(); LOCK2(cs_main, pwallet->cs_wallet); if (pwallet->IsMine(*wtx.tx)) { pwallet->AddToWallet(wtx, false); return NullUniValue; } throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "No addresses in wallet correspond to included transaction"); } UniValue removeprunedfunds(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) throw std::runtime_error( "removeprunedfunds \"txid\"\n" "\nDeletes the specified transaction from the wallet. Meant for use with pruned wallets and as a companion to importprunedfunds. This will affect wallet balances.\n" "\nArguments:\n" "1. \"txid\" (string, required) The hex-encoded id of the transaction you are deleting\n" "\nExamples:\n" + HelpExampleCli("removeprunedfunds", "\"c54357a1ff9f4e792198e75c01fc633acc6d093abd67ec1849596637c3457bf2\"") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("removeprunedfunds", "\"c54357a1ff9f4e792198e75c01fc633acc6d093abd67ec1849596637c3457bf2\"") ); LOCK2(cs_main, pwallet->cs_wallet); uint256 hash; hash.SetHex(request.params[0].get_str()); std::vector<uint256> vHash; vHash.push_back(hash); std::vector<uint256> vHashOut; if (pwallet->ZapSelectTx(vHash, vHashOut) != DBErrors::LOAD_OK) { throw JSONRPCError(RPC_WALLET_ERROR, "Could not properly delete the transaction."); } if(vHashOut.empty()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Transaction does not exist in wallet."); } return NullUniValue; } UniValue importpubkey(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() < 1 || request.params.size() > 3) throw std::runtime_error( "importpubkey \"pubkey\" ( \"label\" rescan )\n" "\nAdds a public key (in hex) that can be watched as if it were in your wallet but cannot be used to spend. Requires a new wallet backup.\n" "\nArguments:\n" "1. \"pubkey\" (string, required) The hex-encoded public key\n" "2. \"label\" (string, optional, default=\"\") An optional label\n" "3. rescan (boolean, optional, default=true) Rescan the wallet for transactions\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported pubkey exists but related transactions are still missing, leading to temporarily incorrect/bogus balances and unspent outputs until rescan completes.\n" "\nExamples:\n" "\nImport a public key with rescan\n" + HelpExampleCli("importpubkey", "\"mypubkey\"") + "\nImport using a label without rescan\n" + HelpExampleCli("importpubkey", "\"mypubkey\" \"testing\" false") + "\nAs a JSON-RPC call\n" + HelpExampleRpc("importpubkey", "\"mypubkey\", \"testing\", false") ); std::string strLabel; if (!request.params[1].isNull()) strLabel = request.params[1].get_str(); // Whether to perform rescan after import bool fRescan = true; if (!request.params[2].isNull()) fRescan = request.params[2].get_bool(); if (fRescan && fPruneMode) throw JSONRPCError(RPC_WALLET_ERROR, "Rescan is disabled in pruned mode"); WalletRescanReserver reserver(pwallet); if (fRescan && !reserver.reserve()) { throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait."); } if (!IsHex(request.params[0].get_str())) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string"); std::vector<unsigned char> data(ParseHex(request.params[0].get_str())); CPubKey pubKey(data.begin(), data.end()); if (!pubKey.IsFullyValid()) throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key"); { LOCK2(cs_main, pwallet->cs_wallet); for (const auto& dest : GetAllDestinationsForKey(pubKey)) { ImportAddress(pwallet, dest, strLabel); } ImportScript(pwallet, GetScriptForRawPubKey(pubKey), strLabel, false); pwallet->LearnAllRelatedScripts(pubKey); } if (fRescan) { RescanWallet(*pwallet, reserver); pwallet->ReacceptWalletTransactions(); } return NullUniValue; } UniValue importwallet(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) throw std::runtime_error( "importwallet \"filename\"\n" "\nImports keys from a wallet dump file (see dumpwallet). Requires a new wallet backup to include imported keys.\n" "\nArguments:\n" "1. \"filename\" (string, required) The wallet file\n" "\nExamples:\n" "\nDump the wallet\n" + HelpExampleCli("dumpwallet", "\"test\"") + "\nImport the wallet\n" + HelpExampleCli("importwallet", "\"test\"") + "\nImport using the json rpc call\n" + HelpExampleRpc("importwallet", "\"test\"") ); if (fPruneMode) throw JSONRPCError(RPC_WALLET_ERROR, "Importing wallets is disabled in pruned mode"); WalletRescanReserver reserver(pwallet); if (!reserver.reserve()) { throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait."); } int64_t nTimeBegin = 0; bool fGood = true; { LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::ifstream file; file.open(request.params[0].get_str().c_str(), std::ios::in | std::ios::ate); if (!file.is_open()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file"); } nTimeBegin = chainActive.Tip()->GetBlockTime(); int64_t nFilesize = std::max((int64_t)1, (int64_t)file.tellg()); file.seekg(0, file.beg); // Use uiInterface.ShowProgress instead of pwallet.ShowProgress because pwallet.ShowProgress has a cancel button tied to AbortRescan which // we don't want for this progress bar showing the import progress. uiInterface.ShowProgress does not have a cancel button. uiInterface.ShowProgress(strprintf("%s " + _("Importing..."), pwallet->GetDisplayName()), 0, false); // show progress dialog in GUI while (file.good()) { uiInterface.ShowProgress("", std::max(1, std::min(99, (int)(((double)file.tellg() / (double)nFilesize) * 100))), false); std::string line; std::getline(file, line); if (line.empty() || line[0] == '#') continue; std::vector<std::string> vstr; boost::split(vstr, line, boost::is_any_of(" ")); if (vstr.size() < 2) continue; CKey key = DecodeSecret(vstr[0]); if (key.IsValid()) { CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); CKeyID keyid = pubkey.GetID(); if (pwallet->HaveKey(keyid)) { pwallet->WalletLogPrintf("Skipping import of %s (key already present)\n", EncodeDestination(keyid)); continue; } int64_t nTime = DecodeDumpTime(vstr[1]); std::string strLabel; bool fLabel = true; for (unsigned int nStr = 2; nStr < vstr.size(); nStr++) { if (vstr[nStr].front() == '#') break; if (vstr[nStr] == "change=1") fLabel = false; if (vstr[nStr] == "reserve=1") fLabel = false; if (vstr[nStr].substr(0,6) == "label=") { strLabel = DecodeDumpString(vstr[nStr].substr(6)); fLabel = true; } } pwallet->WalletLogPrintf("Importing %s...\n", EncodeDestination(keyid)); if (!pwallet->AddKeyPubKey(key, pubkey)) { fGood = false; continue; } pwallet->mapKeyMetadata[keyid].nCreateTime = nTime; if (fLabel) pwallet->SetAddressBook(keyid, strLabel, "receive"); nTimeBegin = std::min(nTimeBegin, nTime); } else if(IsHex(vstr[0])) { std::vector<unsigned char> vData(ParseHex(vstr[0])); CScript script = CScript(vData.begin(), vData.end()); CScriptID id(script); if (pwallet->HaveCScript(id)) { pwallet->WalletLogPrintf("Skipping import of %s (script already present)\n", vstr[0]); continue; } if(!pwallet->AddCScript(script)) { pwallet->WalletLogPrintf("Error importing script %s\n", vstr[0]); fGood = false; continue; } int64_t birth_time = DecodeDumpTime(vstr[1]); if (birth_time > 0) { pwallet->m_script_metadata[id].nCreateTime = birth_time; nTimeBegin = std::min(nTimeBegin, birth_time); } } } file.close(); uiInterface.ShowProgress("", 100, false); // hide progress dialog in GUI pwallet->UpdateTimeFirstKey(nTimeBegin); } uiInterface.ShowProgress("", 100, false); // hide progress dialog in GUI RescanWallet(*pwallet, reserver, nTimeBegin, false /* update */); pwallet->MarkDirty(); if (!fGood) throw JSONRPCError(RPC_WALLET_ERROR, "Error adding some keys/scripts to wallet"); return NullUniValue; } UniValue dumpprivkey(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) throw std::runtime_error( "dumpprivkey \"address\"\n" "\nReveals the private key corresponding to 'address'.\n" "Then the importprivkey can be used with this output\n" "\nArguments:\n" "1. \"address\" (string, required) The granacoin address for the private key\n" "\nResult:\n" "\"key\" (string) The private key\n" "\nExamples:\n" + HelpExampleCli("dumpprivkey", "\"myaddress\"") + HelpExampleCli("importprivkey", "\"mykey\"") + HelpExampleRpc("dumpprivkey", "\"myaddress\"") ); LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); std::string strAddress = request.params[0].get_str(); CTxDestination dest = DecodeDestination(strAddress); if (!IsValidDestination(dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid Granacoin address"); } auto keyid = GetKeyForDestination(*pwallet, dest); if (keyid.IsNull()) { throw JSONRPCError(RPC_TYPE_ERROR, "Address does not refer to a key"); } CKey vchSecret; if (!pwallet->GetKey(keyid, vchSecret)) { throw JSONRPCError(RPC_WALLET_ERROR, "Private key for address " + strAddress + " is not known"); } return EncodeSecret(vchSecret); } UniValue dumpwallet(const JSONRPCRequest& request) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, request.fHelp)) { return NullUniValue; } if (request.fHelp || request.params.size() != 1) throw std::runtime_error( "dumpwallet \"filename\"\n" "\nDumps all wallet keys in a human-readable format to a server-side file. This does not allow overwriting existing files.\n" "Imported scripts are included in the dumpfile, but corresponding BIP173 addresses, etc. may not be added automatically by importwallet.\n" "Note that if your wallet contains keys which are not derived from your HD seed (e.g. imported keys), these are not covered by\n" "only backing up the seed itself, and must be backed up too (e.g. ensure you back up the whole dumpfile).\n" "\nArguments:\n" "1. \"filename\" (string, required) The filename with path (either absolute or relative to granacoind)\n" "\nResult:\n" "{ (json object)\n" " \"filename\" : { (string) The filename with full absolute path\n" "}\n" "\nExamples:\n" + HelpExampleCli("dumpwallet", "\"test\"") + HelpExampleRpc("dumpwallet", "\"test\"") ); LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); boost::filesystem::path filepath = request.params[0].get_str(); filepath = boost::filesystem::absolute(filepath); /* Prevent arbitrary files from being overwritten. There have been reports * that users have overwritten wallet files this way: * https://github.com/bitcoin/bitcoin/issues/9934 * It may also avoid other security issues. */ if (boost::filesystem::exists(filepath)) { throw JSONRPCError(RPC_INVALID_PARAMETER, filepath.string() + " already exists. If you are sure this is what you want, move it out of the way first"); } std::ofstream file; file.open(filepath.string().c_str()); if (!file.is_open()) throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file"); std::map<CTxDestination, int64_t> mapKeyBirth; const std::map<CKeyID, int64_t>& mapKeyPool = pwallet->GetAllReserveKeys(); pwallet->GetKeyBirthTimes(mapKeyBirth); std::set<CScriptID> scripts = pwallet->GetCScripts(); // TODO: include scripts in GetKeyBirthTimes() output instead of separate // sort time/key pairs std::vector<std::pair<int64_t, CKeyID> > vKeyBirth; for (const auto& entry : mapKeyBirth) { if (const CKeyID* keyID = boost::get<CKeyID>(&entry.first)) { // set and test vKeyBirth.push_back(std::make_pair(entry.second, *keyID)); } } mapKeyBirth.clear(); std::sort(vKeyBirth.begin(), vKeyBirth.end()); // produce output file << strprintf("# Wallet dump created by Granacoin %s\n", CLIENT_BUILD); file << strprintf("# * Created on %s\n", FormatISO8601DateTime(GetTime())); file << strprintf("# * Best block at time of backup was %i (%s),\n", chainActive.Height(), chainActive.Tip()->GetBlockHash().ToString()); file << strprintf("# mined on %s\n", FormatISO8601DateTime(chainActive.Tip()->GetBlockTime())); file << "\n"; // add the base58check encoded extended master if the wallet uses HD CKeyID seed_id = pwallet->GetHDChain().seed_id; if (!seed_id.IsNull()) { CKey seed; if (pwallet->GetKey(seed_id, seed)) { CExtKey masterKey; masterKey.SetSeed(seed.begin(), seed.size()); file << "# extended private masterkey: " << EncodeExtKey(masterKey) << "\n\n"; } } for (std::vector<std::pair<int64_t, CKeyID> >::const_iterator it = vKeyBirth.begin(); it != vKeyBirth.end(); it++) { const CKeyID &keyid = it->second; std::string strTime = FormatISO8601DateTime(it->first); std::string strAddr; std::string strLabel; CKey key; if (pwallet->GetKey(keyid, key)) { file << strprintf("%s %s ", EncodeSecret(key), strTime); if (GetWalletAddressesForKey(pwallet, keyid, strAddr, strLabel)) { file << strprintf("label=%s", strLabel); } else if (keyid == seed_id) { file << "hdseed=1"; } else if (mapKeyPool.count(keyid)) { file << "reserve=1"; } else if (pwallet->mapKeyMetadata[keyid].hdKeypath == "s") { file << "inactivehdseed=1"; } else { file << "change=1"; } file << strprintf(" # addr=%s%s\n", strAddr, (pwallet->mapKeyMetadata[keyid].hdKeypath.size() > 0 ? " hdkeypath="+pwallet->mapKeyMetadata[keyid].hdKeypath : "")); } } file << "\n"; for (const CScriptID &scriptid : scripts) { CScript script; std::string create_time = "0"; std::string address = EncodeDestination(scriptid); // get birth times for scripts with metadata auto it = pwallet->m_script_metadata.find(scriptid); if (it != pwallet->m_script_metadata.end()) { create_time = FormatISO8601DateTime(it->second.nCreateTime); } if(pwallet->GetCScript(scriptid, script)) { file << strprintf("%s %s script=1", HexStr(script.begin(), script.end()), create_time); file << strprintf(" # addr=%s\n", address); } } file << "\n"; file << "# End of dump\n"; file.close(); UniValue reply(UniValue::VOBJ); reply.pushKV("filename", filepath.string()); return reply; } static UniValue ProcessImport(CWallet * const pwallet, const UniValue& data, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(pwallet->cs_wallet) { try { bool success = false; // Required fields. const UniValue& scriptPubKey = data["scriptPubKey"]; // Should have script or JSON with "address". if (!(scriptPubKey.getType() == UniValue::VOBJ && scriptPubKey.exists("address")) && !(scriptPubKey.getType() == UniValue::VSTR)) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid scriptPubKey"); } // Optional fields. const std::string& strRedeemScript = data.exists("redeemscript") ? data["redeemscript"].get_str() : ""; const UniValue& pubKeys = data.exists("pubkeys") ? data["pubkeys"].get_array() : UniValue(); const UniValue& keys = data.exists("keys") ? data["keys"].get_array() : UniValue(); const bool internal = data.exists("internal") ? data["internal"].get_bool() : false; const bool watchOnly = data.exists("watchonly") ? data["watchonly"].get_bool() : false; const std::string& label = data.exists("label") && !internal ? data["label"].get_str() : ""; bool isScript = scriptPubKey.getType() == UniValue::VSTR; bool isP2SH = strRedeemScript.length() > 0; const std::string& output = isScript ? scriptPubKey.get_str() : scriptPubKey["address"].get_str(); // Parse the output. CScript script; CTxDestination dest; if (!isScript) { dest = DecodeDestination(output); if (!IsValidDestination(dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address"); } script = GetScriptForDestination(dest); } else { if (!IsHex(output)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid scriptPubKey"); } std::vector<unsigned char> vData(ParseHex(output)); script = CScript(vData.begin(), vData.end()); } // Watchonly and private keys if (watchOnly && keys.size()) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Incompatibility found between watchonly and keys"); } // Internal + Label if (internal && data.exists("label")) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Incompatibility found between internal and label"); } // Not having Internal + Script if (!internal && isScript) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Internal must be set for hex scriptPubKey"); } // Keys / PubKeys size check. if (!isP2SH && (keys.size() > 1 || pubKeys.size() > 1)) { // Address / scriptPubKey throw JSONRPCError(RPC_INVALID_PARAMETER, "More than private key given for one address"); } // Invalid P2SH redeemScript if (isP2SH && !IsHex(strRedeemScript)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid redeem script"); } // Process. // // P2SH if (isP2SH) { // Import redeem script. std::vector<unsigned char> vData(ParseHex(strRedeemScript)); CScript redeemScript = CScript(vData.begin(), vData.end()); // Invalid P2SH address if (!script.IsPayToScriptHash()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid P2SH address / script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(redeemScript, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } CScriptID redeem_id(redeemScript); if (!pwallet->HaveCScript(redeem_id) && !pwallet->AddCScript(redeemScript)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding p2sh redeemScript to wallet"); } CScript redeemDestination = GetScriptForDestination(redeem_id); if (::IsMine(*pwallet, redeemDestination) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(redeemDestination, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } // add to address book or update label if (IsValidDestination(dest)) { pwallet->SetAddressBook(dest, label, "receive"); } // Import private keys. if (keys.size()) { for (size_t i = 0; i < keys.size(); i++) { const std::string& privkey = keys[i].get_str(); CKey key = DecodeSecret(privkey); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key encoding"); } CPubKey pubkey = key.GetPubKey(); assert(key.VerifyPubKey(pubkey)); CKeyID vchAddress = pubkey.GetID(); pwallet->MarkDirty(); pwallet->SetAddressBook(vchAddress, label, "receive"); if (pwallet->HaveKey(vchAddress)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Already have this key"); } pwallet->mapKeyMetadata[vchAddress].nCreateTime = timestamp; if (!pwallet->AddKeyPubKey(key, pubkey)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet"); } pwallet->UpdateTimeFirstKey(timestamp); } } success = true; } else { // Import public keys. if (pubKeys.size() && keys.size() == 0) { const std::string& strPubKey = pubKeys[0].get_str(); if (!IsHex(strPubKey)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string"); } std::vector<unsigned char> vData(ParseHex(strPubKey)); CPubKey pubKey(vData.begin(), vData.end()); if (!pubKey.IsFullyValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key"); } CTxDestination pubkey_dest = pubKey.GetID(); // Consistency check. if (!isScript && !(pubkey_dest == dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } // Consistency check. if (isScript) { CTxDestination destination; if (ExtractDestination(script, destination)) { if (!(destination == pubkey_dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } } } CScript pubKeyScript = GetScriptForDestination(pubkey_dest); if (::IsMine(*pwallet, pubKeyScript) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(pubKeyScript, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } // add to address book or update label if (IsValidDestination(pubkey_dest)) { pwallet->SetAddressBook(pubkey_dest, label, "receive"); } // TODO Is this necessary? CScript scriptRawPubKey = GetScriptForRawPubKey(pubKey); if (::IsMine(*pwallet, scriptRawPubKey) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(scriptRawPubKey, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } success = true; } // Import private keys. if (keys.size()) { const std::string& strPrivkey = keys[0].get_str(); // Checks. CKey key = DecodeSecret(strPrivkey); if (!key.IsValid()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid private key encoding"); } CPubKey pubKey = key.GetPubKey(); assert(key.VerifyPubKey(pubKey)); CTxDestination pubkey_dest = pubKey.GetID(); // Consistency check. if (!isScript && !(pubkey_dest == dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } // Consistency check. if (isScript) { CTxDestination destination; if (ExtractDestination(script, destination)) { if (!(destination == pubkey_dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Consistency check failed"); } } } CKeyID vchAddress = pubKey.GetID(); pwallet->MarkDirty(); pwallet->SetAddressBook(vchAddress, label, "receive"); if (pwallet->HaveKey(vchAddress)) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->mapKeyMetadata[vchAddress].nCreateTime = timestamp; if (!pwallet->AddKeyPubKey(key, pubKey)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding key to wallet"); } pwallet->UpdateTimeFirstKey(timestamp); success = true; } // Import scriptPubKey only. if (pubKeys.size() == 0 && keys.size() == 0) { if (::IsMine(*pwallet, script) == ISMINE_SPENDABLE) { throw JSONRPCError(RPC_WALLET_ERROR, "The wallet already contains the private key for this address or script"); } pwallet->MarkDirty(); if (!pwallet->AddWatchOnly(script, timestamp)) { throw JSONRPCError(RPC_WALLET_ERROR, "Error adding address to wallet"); } if (scriptPubKey.getType() == UniValue::VOBJ) { // add to address book or update label if (IsValidDestination(dest)) { pwallet->SetAddressBook(dest, label, "receive"); } } success = true; } } UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(success)); return result; } catch (const UniValue& e) { UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(false)); result.pushKV("error", e); return result; } catch (...) { UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(false)); result.pushKV("error", JSONRPCError(RPC_MISC_ERROR, "Missing required fields")); return result; } } static int64_t GetImportTimestamp(const UniValue& data, int64_t now) { if (data.exists("timestamp")) { const UniValue& timestamp = data["timestamp"]; if (timestamp.isNum()) { return timestamp.get_int64(); } else if (timestamp.isStr() && timestamp.get_str() == "now") { return now; } throw JSONRPCError(RPC_TYPE_ERROR, strprintf("Expected number or \"now\" timestamp value for key. got type %s", uvTypeName(timestamp.type()))); } throw JSONRPCError(RPC_TYPE_ERROR, "Missing required timestamp field for key"); } UniValue importmulti(const JSONRPCRequest& mainRequest) { std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(mainRequest); CWallet* const pwallet = wallet.get(); if (!EnsureWalletIsAvailable(pwallet, mainRequest.fHelp)) { return NullUniValue; } // clang-format off if (mainRequest.fHelp || mainRequest.params.size() < 1 || mainRequest.params.size() > 2) throw std::runtime_error( "importmulti \"requests\" ( \"options\" )\n\n" "Import addresses/scripts (with private or public keys, redeem script (P2SH)), rescanning all addresses in one-shot-only (rescan can be disabled via options). Requires a new wallet backup.\n\n" "Arguments:\n" "1. requests (array, required) Data to be imported\n" " [ (array of json objects)\n" " {\n" " \"scriptPubKey\": \"<script>\" | { \"address\":\"<address>\" }, (string / json, required) Type of scriptPubKey (string for script, json for address)\n" " \"timestamp\": timestamp | \"now\" , (integer / string, required) Creation time of the key in seconds since epoch (Jan 1 1970 GMT),\n" " or the string \"now\" to substitute the current synced blockchain time. The timestamp of the oldest\n" " key will determine how far back blockchain rescans need to begin for missing wallet transactions.\n" " \"now\" can be specified to bypass scanning, for keys which are known to never have been used, and\n" " 0 can be specified to scan the entire blockchain. Blocks up to 2 hours before the earliest key\n" " creation time of all keys being imported by the importmulti call will be scanned.\n" " \"redeemscript\": \"<script>\" , (string, optional) Allowed only if the scriptPubKey is a P2SH address or a P2SH scriptPubKey\n" " \"pubkeys\": [\"<pubKey>\", ... ] , (array, optional) Array of strings giving pubkeys that must occur in the output or redeemscript\n" " \"keys\": [\"<key>\", ... ] , (array, optional) Array of strings giving private keys whose corresponding public keys must occur in the output or redeemscript\n" " \"internal\": <true> , (boolean, optional, default: false) Stating whether matching outputs should be treated as not incoming payments\n" " \"watchonly\": <true> , (boolean, optional, default: false) Stating whether matching outputs should be considered watched even when they're not spendable, only allowed if keys are empty\n" " \"label\": <label> , (string, optional, default: '') Label to assign to the address (aka account name, for now), only allowed with internal=false\n" " }\n" " ,...\n" " ]\n" "2. options (json, optional)\n" " {\n" " \"rescan\": <false>, (boolean, optional, default: true) Stating if should rescan the blockchain after all imports\n" " }\n" "\nNote: This call can take over an hour to complete if rescan is true, during that time, other rpc calls\n" "may report that the imported keys, addresses or scripts exists but related transactions are still missing.\n" "\nExamples:\n" + HelpExampleCli("importmulti", "'[{ \"scriptPubKey\": { \"address\": \"<my address>\" }, \"timestamp\":1455191478 }, " "{ \"scriptPubKey\": { \"address\": \"<my 2nd address>\" }, \"label\": \"example 2\", \"timestamp\": 1455191480 }]'") + HelpExampleCli("importmulti", "'[{ \"scriptPubKey\": { \"address\": \"<my address>\" }, \"timestamp\":1455191478 }]' '{ \"rescan\": false}'") + "\nResponse is an array with the same size as the input that has the execution result :\n" " [{ \"success\": true } , { \"success\": false, \"error\": { \"code\": -1, \"message\": \"Internal Server Error\"} }, ... ]\n"); // clang-format on RPCTypeCheck(mainRequest.params, {UniValue::VARR, UniValue::VOBJ}); const UniValue& requests = mainRequest.params[0]; //Default options bool fRescan = true; if (!mainRequest.params[1].isNull()) { const UniValue& options = mainRequest.params[1]; if (options.exists("rescan")) { fRescan = options["rescan"].get_bool(); } } WalletRescanReserver reserver(pwallet); if (fRescan && !reserver.reserve()) { throw JSONRPCError(RPC_WALLET_ERROR, "Wallet is currently rescanning. Abort existing rescan or wait."); } int64_t now = 0; bool fRunScan = false; int64_t nLowestTimestamp = 0; UniValue response(UniValue::VARR); { LOCK2(cs_main, pwallet->cs_wallet); EnsureWalletIsUnlocked(pwallet); // Verify all timestamps are present before importing any keys. now = chainActive.Tip() ? chainActive.Tip()->GetMedianTimePast() : 0; for (const UniValue& data : requests.getValues()) { GetImportTimestamp(data, now); } const int64_t minimumTimestamp = 1; if (fRescan && chainActive.Tip()) { nLowestTimestamp = chainActive.Tip()->GetBlockTime(); } else { fRescan = false; } for (const UniValue& data : requests.getValues()) { const int64_t timestamp = std::max(GetImportTimestamp(data, now), minimumTimestamp); const UniValue result = ProcessImport(pwallet, data, timestamp); response.push_back(result); if (!fRescan) { continue; } // If at least one request was successful then allow rescan. if (result["success"].get_bool()) { fRunScan = true; } // Get the lowest timestamp. if (timestamp < nLowestTimestamp) { nLowestTimestamp = timestamp; } } } if (fRescan && fRunScan && requests.size()) { int64_t scannedTime = pwallet->RescanFromTime(nLowestTimestamp, reserver, true /* update */); pwallet->ReacceptWalletTransactions(); if (pwallet->IsAbortingRescan()) { throw JSONRPCError(RPC_MISC_ERROR, "Rescan aborted by user."); } if (scannedTime > nLowestTimestamp) { std::vector<UniValue> results = response.getValues(); response.clear(); response.setArray(); size_t i = 0; for (const UniValue& request : requests.getValues()) { // If key creation date is within the successfully scanned // range, or if the import result already has an error set, let // the result stand unmodified. Otherwise replace the result // with an error message. if (scannedTime <= GetImportTimestamp(request, now) || results.at(i).exists("error")) { response.push_back(results.at(i)); } else { UniValue result = UniValue(UniValue::VOBJ); result.pushKV("success", UniValue(false)); result.pushKV( "error", JSONRPCError( RPC_MISC_ERROR, strprintf("Rescan failed for key with creation timestamp %d. There was an error reading a " "block from time %d, which is after or within %d seconds of key creation, and " "could contain transactions pertaining to the key. As a result, transactions " "and coins using this key may not appear in the wallet. This error could be " "caused by pruning or data corruption (see granacoind log for details) and could " "be dealt with by downloading and rescanning the relevant blocks (see -reindex " "and -rescan options).", GetImportTimestamp(request, now), scannedTime - TIMESTAMP_WINDOW - 1, TIMESTAMP_WINDOW))); response.push_back(std::move(result)); } ++i; } } } return response; }
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/toolbar/component_toolbar_actions_factory.h" #include "base/command_line.h" #include "base/lazy_instance.h" #include "chrome/browser/extensions/component_migration_helper.h" #include "chrome/browser/media/router/media_router_feature.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/toolbar/toolbar_action_view_controller.h" #include "extensions/common/feature_switch.h" #if defined(ENABLE_MEDIA_ROUTER) #include "chrome/browser/ui/toolbar/media_router_action.h" #endif namespace { ComponentToolbarActionsFactory* testing_factory_ = nullptr; base::LazyInstance<ComponentToolbarActionsFactory> lazy_factory = LAZY_INSTANCE_INITIALIZER; const char kCastExtensionId[] = "boadgeojelhgndaghljhdicfkmllpafd"; const char kCastBetaExtensionId[] = "dliochdbjfkdbacpmhlcpmleaejidimm"; } // namespace // static const char ComponentToolbarActionsFactory::kMediaRouterActionId[] = "media_router_action"; ComponentToolbarActionsFactory::ComponentToolbarActionsFactory() {} ComponentToolbarActionsFactory::~ComponentToolbarActionsFactory() {} // static ComponentToolbarActionsFactory* ComponentToolbarActionsFactory::GetInstance() { return testing_factory_ ? testing_factory_ : &lazy_factory.Get(); } std::set<std::string> ComponentToolbarActionsFactory::GetInitialComponentIds( Profile* profile) { std::set<std::string> component_ids; return component_ids; } std::unique_ptr<ToolbarActionViewController> ComponentToolbarActionsFactory::GetComponentToolbarActionForId( const std::string& id, Browser* browser, ToolbarActionsBar* bar) { // This is currently behind the extension-action-redesign flag, as it is // designed for the new toolbar. DCHECK(extensions::FeatureSwitch::extension_action_redesign()->IsEnabled()); // Add component toolbar actions here. // This current design means that the ComponentToolbarActionsFactory is aware // of all actions. Since we should *not* have an excessive amount of these // (since each will have an action in the toolbar or overflow menu), this // should be okay. If this changes, we should rethink this design to have, // e.g., RegisterChromeAction(). #if defined(ENABLE_MEDIA_ROUTER) #if defined(NWJS_SDK) if (id == kMediaRouterActionId) return std::unique_ptr<ToolbarActionViewController>( new MediaRouterAction(browser, bar)); #endif #endif // defined(ENABLE_MEDIA_ROUTER) NOTREACHED(); return std::unique_ptr<ToolbarActionViewController>(); } // static void ComponentToolbarActionsFactory::SetTestingFactory( ComponentToolbarActionsFactory* factory) { testing_factory_ = factory; } void ComponentToolbarActionsFactory::RegisterComponentMigrations( extensions::ComponentMigrationHelper* helper) const { helper->Register(kMediaRouterActionId, kCastExtensionId); helper->Register(kMediaRouterActionId, kCastBetaExtensionId); } void ComponentToolbarActionsFactory::HandleComponentMigrations( extensions::ComponentMigrationHelper* helper, Profile* profile) const { if (media_router::MediaRouterEnabled(profile)) { helper->OnFeatureEnabled(kMediaRouterActionId); } else { helper->OnFeatureDisabled(kMediaRouterActionId); } }
/************************************************************ * * OPEN TRANSACTIONS * * Financial Cryptography and Digital Cash * Library, Protocol, API, Server, CLI, GUI * * -- Anonymous Numbered Accounts. * -- Untraceable Digital Cash. * -- Triple-Signed Receipts. * -- Cheques, Vouchers, Transfers, Inboxes. * -- Basket Currencies, Markets, Payment Plans. * -- Signed, XML, Ricardian-style Contracts. * -- Scripted smart contracts. * * EMAIL: * fellowtraveler@opentransactions.org * * WEBSITE: * http://www.opentransactions.org/ * * ----------------------------------------------------- * * LICENSE: * This Source Code Form is subject to the terms of the * Mozilla Public License, v. 2.0. If a copy of the MPL * was not distributed with this file, You can obtain one * at http://mozilla.org/MPL/2.0/. * * DISCLAIMER: * This program is distributed in the hope that it will * be useful, but WITHOUT ANY WARRANTY; without even the * implied warranty of MERCHANTABILITY or FITNESS FOR A * PARTICULAR PURPOSE. See the Mozilla Public License * for more details. * ************************************************************/ #ifndef OPENTXS_CORE_CRYPTO_BIP32_HPP #define OPENTXS_CORE_CRYPTO_BIP32_HPP #include "opentxs/core/Proto.hpp" #include "opentxs/core/Types.hpp" #include "opentxs/core/crypto/CryptoSymmetric.hpp" #include "opentxs/core/crypto/OTAsymmetricKey.hpp" #include <string> namespace opentxs { class OTPassword; class Bip32 { public: virtual std::string SeedToFingerprint( const EcdsaCurve& curve, const OTPassword& seed) const = 0; virtual serializedAsymmetricKey SeedToPrivateKey( const EcdsaCurve& curve, const OTPassword& seed) const = 0; virtual serializedAsymmetricKey GetChild( const proto::AsymmetricKey& parent, const uint32_t index) const = 0; std::string Seed(const std::string& fingerprint = "") const; serializedAsymmetricKey GetHDKey( const EcdsaCurve& curve, proto::HDPath& path) const; serializedAsymmetricKey GetPaymentCode(const uint32_t nym) const; }; } // namespace opentxs #endif // OPENTXS_CORE_CRYPTO_BIP32_HPP
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/sms-voice/model/KinesisFirehoseDestination.h> #include <aws/core/utils/json/JsonSerializer.h> #include <utility> using namespace Aws::Utils::Json; using namespace Aws::Utils; namespace Aws { namespace PinpointSMSVoice { namespace Model { KinesisFirehoseDestination::KinesisFirehoseDestination() : m_deliveryStreamArnHasBeenSet(false), m_iamRoleArnHasBeenSet(false) { } KinesisFirehoseDestination::KinesisFirehoseDestination(JsonView jsonValue) : m_deliveryStreamArnHasBeenSet(false), m_iamRoleArnHasBeenSet(false) { *this = jsonValue; } KinesisFirehoseDestination& KinesisFirehoseDestination::operator =(JsonView jsonValue) { if(jsonValue.ValueExists("DeliveryStreamArn")) { m_deliveryStreamArn = jsonValue.GetString("DeliveryStreamArn"); m_deliveryStreamArnHasBeenSet = true; } if(jsonValue.ValueExists("IamRoleArn")) { m_iamRoleArn = jsonValue.GetString("IamRoleArn"); m_iamRoleArnHasBeenSet = true; } return *this; } JsonValue KinesisFirehoseDestination::Jsonize() const { JsonValue payload; if(m_deliveryStreamArnHasBeenSet) { payload.WithString("DeliveryStreamArn", m_deliveryStreamArn); } if(m_iamRoleArnHasBeenSet) { payload.WithString("IamRoleArn", m_iamRoleArn); } return payload; } } // namespace Model } // namespace PinpointSMSVoice } // namespace Aws
#include <vector> #include <iostream> #include <cassert> #include <set> #include <map> #include <ilcplex/ilocplex.h> #include "../bt-maxsat/io.hpp" #include "../bt-maxsat/phyl_mat.hpp" #define F first #define S second using namespace triangulator; using namespace std; set<int> gametes(const vector<int>& a, const vector<int>& b) { assert(a.size() == b.size()); int h1=0; int h2=0; int h3=0; int h4=0; for (int i=0;i<(int)a.size();i++){ if (a[i]==0&&b[i]==0){ h1=true; } if(a[i]==0&&b[i]==1){ h2=true; } if(a[i]==1&&b[i]==0){ h3=true; } if(a[i]==1&&b[i]==1){ h4=true; } } set<int> r; if (h1) r.insert(0); if (h2) r.insert(1); if (h3) r.insert(2); if (h4) r.insert(3); return r; } vector<vector<int>> forbGamete(vector<vector<int>> binmat) { bool f=true; cout<<"applying forbGamete"<<endl; while (f){ f=false; for (int i=0;i<(int)binmat.size();i++){ for (int ii=i+1;ii<(int)binmat.size();ii++){ auto gs = gametes(binmat[i], binmat[ii]); if (gs.size()==4) { cout<<"forbgamete unsat"<<endl; return {{-2}}; } if (gs.size()<3) continue; int miss=0; for (int g=0;g<4;g++){ if (gs.count(g)==0){ miss=g; } } for (int j=0;j<(int)binmat[i].size();j++){ if (miss==0){ if (binmat[i][j]==0&&binmat[ii][j]==-1){ binmat[ii][j]=1; f=true; } if (binmat[ii][j]==0&&binmat[i][j]==-1){ binmat[i][j]=1; f=true; } } else if(miss==1){ if (binmat[i][j]==0&&binmat[ii][j]==-1){ binmat[ii][j]=0; f=true; } if (binmat[ii][j]==1&&binmat[i][j]==-1){ binmat[i][j]=1; f=true; } } else if(miss==2){ if (binmat[i][j]==1&&binmat[ii][j]==-1){ binmat[ii][j]=1; f=true; } if (binmat[ii][j]==0&&binmat[i][j]==-1){ binmat[i][j]=0; f=true; } } else if(miss==3){ if (binmat[i][j]==1&&binmat[ii][j]==-1){ binmat[ii][j]=0; f=true; } if (binmat[ii][j]==1&&binmat[i][j]==-1){ binmat[i][j]=0; f=true; } } else{ assert(0); } } } } if (f) { cout<<"found fg"<<endl; } } return binmat; } vector<vector<int>> charNest(vector<vector<int>> binmat) { for (int i=0;i<(int)binmat.size();i++){ for (int ii=i+1;ii<(int)binmat.size();ii++){ bool ok=true; for (int j=0;j<(int)binmat[i].size();j++){ if (binmat[i][j] != binmat[ii][j] && binmat[i][j] != -1) { ok = false; break; } } if (ok) { cout<<"found charnest"<<endl; swap(binmat[i], binmat.back()); binmat.pop_back(); return charNest(binmat); } } } return binmat; } bool solveMD(vector<vector<int>> binmat) { vector<vector<int>> bad = {{-2}}; binmat = forbGamete(binmat); if (binmat == bad) return false; binmat = charNest(binmat); IloEnv env; IloModel model(env); IloCplex cplex(model); cplex.setParam(IloCplex::Threads, 1); vector<vector<int>> yvar_id(binmat.size()); IloNumVarArray Y_vars(env); int ycs = 0; for (int i=0;i<(int)binmat.size();i++){ yvar_id[i].resize(binmat[i].size()); for (int j=0;j<(int)binmat[i].size();j++){ Y_vars.add(IloNumVar(env, 0, 1, ILOBOOL)); yvar_id[i][j] = ycs++; if (binmat[i][j] == 0) { model.add(Y_vars[yvar_id[i][j]] <= 0); } else if (binmat[i][j] == 1) { model.add(Y_vars[yvar_id[i][j]] >= 1); } } } IloExpr obj(env); IloNumVarArray tcvs(env); int tcs=0; for (int i=0;i<(int)binmat.size();i++){ for (int ii=i+1;ii<(int)binmat.size();ii++){ IloNumVar tc(env, 0, 1, ILOBOOL); obj += tc; tcvs.add(tc); tcs++; auto gs = gametes(binmat[i], binmat[ii]); assert(gs.size() < 4); IloExpr bexp(env); for (int g=0;g<4;g++){ if (gs.count(g) == 0) { IloNumVar bv(env, 0, 1, ILOBOOL); bexp += bv; for (int j=0;j<(int)binmat[i].size();j++){ if (binmat[i][j]==-1 || binmat[ii][j]==-1){ if (g==0){ model.add(bv >= -Y_vars[yvar_id[i][j]] -Y_vars[yvar_id[ii][j]] + 1); } else if(g==1){ model.add(bv >= -Y_vars[yvar_id[i][j]] +Y_vars[yvar_id[ii][j]]); } else if(g==2){ model.add(bv >= Y_vars[yvar_id[i][j]] -Y_vars[yvar_id[ii][j]]); } else { model.add(bv >= Y_vars[yvar_id[i][j]] +Y_vars[yvar_id[ii][j]] -1); } } } } } model.add((int)gs.size() + bexp <= 3 + tc); } } model.add(IloMinimize(env, obj)); cplex.solve(); assert(cplex.getStatus() == IloAlgorithm::Optimal); IloNumArray vals(env); cplex.getValues(vals, tcvs); for (int i=0;i<tcs;i++){ if (vals[i] > 0.5){ return false; } } return true; } vector<int> trySolveCr(vector<vector<int>> binmat, vector<vector<int>> childs, int th) { IloEnv env; IloModel model(env); IloCplex cplex(model); cplex.setParam(IloCplex::Threads, 1); IloNumVarArray D_vars(env); IloNumVarArray G_vars(env); for (int i=0;i<binmat.size();i++){ D_vars.add(IloNumVar(env, 0, 1, ILOBOOL)); } IloExpr obj(env); for (int i=0;i<(int)childs.size();i++){ G_vars.add(IloNumVar(env, 0, 1, ILOBOOL)); obj += G_vars[i]; IloExpr chc(env); for (int x : childs[i]){ chc += D_vars[x]; } model.add((int)childs[i].size() * G_vars[i] >= chc); } model.add(IloMinimize(env, obj)); vector<vector<int>> yvar_id(binmat.size()); IloNumVarArray Y_vars(env); int ycs=0; for (int i=0;i<(int)binmat.size();i++){ yvar_id[i].resize(binmat[i].size()); for (int j=0;j<(int)binmat[i].size();j++){ Y_vars.add(IloNumVar(env, 0, 1, ILOBOOL)); yvar_id[i][j] = ycs++; if (binmat[i][j] == 0) { model.add(Y_vars[yvar_id[i][j]] <= 0); } else if (binmat[i][j] == 1) { model.add(Y_vars[yvar_id[i][j]] >= 1); } } } for (int i=0;i<(int)binmat.size();i++){ for (int ii=i+1;ii<(int)binmat.size();ii++){ auto gs = gametes(binmat[i], binmat[ii]); if ((int)gs.size() >= th) { if (gs.size() == 4) { model.add(D_vars[i] + D_vars[ii] >= 1); } else { IloNumVar tc(env, 0, 1, ILOBOOL); IloExpr bexp(env); for (int g=0;g<4;g++){ if (gs.count(g) == 0) { IloNumVar bv(env, 0, 1, ILOBOOL); bexp += bv; for (int j=0;j<(int)binmat[i].size();j++){ if (binmat[i][j]==-1 || binmat[ii][j]==-1){ if (g==0){ model.add(bv >= -Y_vars[yvar_id[i][j]] -Y_vars[yvar_id[ii][j]] + 1); } else if(g==1){ model.add(bv >= -Y_vars[yvar_id[i][j]] +Y_vars[yvar_id[ii][j]]); } else if(g==2){ model.add(bv >= Y_vars[yvar_id[i][j]] -Y_vars[yvar_id[ii][j]]); } else { model.add(bv >= Y_vars[yvar_id[i][j]] +Y_vars[yvar_id[ii][j]] -1); } } } } } model.add((int)gs.size() + bexp <= 3 + tc); model.add(D_vars[i] + D_vars[ii] - tc >= 0); } } } } cplex.solve(); assert(cplex.getStatus() == IloAlgorithm::Optimal); IloNumArray vals(env); cplex.getValues(vals, G_vars); vector<int> sol; vector<vector<int>> binmatver; for (int i=0;i<(int)childs.size();i++){ if (vals[i] > 0.5) { sol.push_back(i); } else { for (int x : childs[i]) { binmatver.push_back(binmat[x]); } } } cout<<"solving MD "<<sol.size()<<endl; if (solveMD(binmatver)) { cout<<"good MD"<<endl; return sol; } else { cout<<"bad MD"<<endl; return {-1}; } } int main(){ Io io; PhylMat pm = io.ReadPhylMat(std::cin); vector<map<int, int>> ss(pm.m()); for (int j=0;j<pm.m();j++){ for (int i=0;i<pm.n();i++){ if (pm.GetMat(i, j)>=0){ ss[j][pm.GetMat(i, j)]++; } } } vector<vector<int>> binmat; vector<vector<int>> childs(pm.m()); for (int j=0;j<pm.m();j++){ for (auto s1:ss[j]){ for (auto s2:ss[j]){ if (s1.F < s2.F && s1.S >= 2 && s2.S >= 2) { childs[j].push_back(binmat.size()); binmat.push_back(vector<int>(pm.n())); for (int i=0;i<pm.n();i++){ if (pm.GetMat(i,j)==s1.F){ binmat.back()[i]=0; } else if(pm.GetMat(i,j)==s2.F){ binmat.back()[i]=1; } else { binmat.back()[i]=-1; } } } } } } for (int q=4;q>=0;q--){ auto sol=trySolveCr(binmat, childs, q); vector<int> bad = {-1}; if (sol != bad){ cout<<"sol "<<sol.size()<<endl; for (int x : sol){ cout<<x<<" "; } cout<<endl; break; } } }
#include "EditorLayer.h" #include <imgui/imgui.h> #include <glm/gtc/matrix_transform.hpp> #include <glm/gtc/type_ptr.hpp> #include "Hazel/Scene/SceneSerializer.h" #include "Hazel/Utils/PlatformUtils.h" #include "ImGuizmo.h" #include "Hazel/Math/Math.h" namespace Hazel { EditorLayer::EditorLayer() : Layer("EditorLayer"), m_CameraController(1280.0f / 720.0f), m_SquareColor({ 0.2f, 0.3f, 0.8f, 1.0f }) { } void EditorLayer::OnAttach() { HZ_PROFILE_FUNCTION(); m_CheckerboardTexture = Texture2D::Create("assets/textures/Checkerboard.png"); FramebufferSpecification fbSpec; fbSpec.Attachments = { FramebufferTextureFormat::RGBA8, FramebufferTextureFormat::RED_INTEGER, FramebufferTextureFormat::Depth }; fbSpec.Width = 1280; fbSpec.Height = 720; m_Framebuffer = Framebuffer::Create(fbSpec); m_ActiveScene = CreateRef<Scene>(); auto commandLineArgs = Application::Get().GetCommandLineArgs(); if (commandLineArgs.Count > 1) { auto sceneFilePath = commandLineArgs[1]; SceneSerializer serializer(m_ActiveScene); serializer.Deserialize(sceneFilePath); } m_EditorCamera = EditorCamera(30.0f, 1.778f, 0.1f, 1000.0f); #if 0 // Entity auto square = m_ActiveScene->CreateEntity("Green Square"); square.AddComponent<SpriteRendererComponent>(glm::vec4{0.0f, 1.0f, 0.0f, 1.0f}); auto redSquare = m_ActiveScene->CreateEntity("Red Square"); redSquare.AddComponent<SpriteRendererComponent>(glm::vec4{ 1.0f, 0.0f, 0.0f, 1.0f }); m_SquareEntity = square; m_CameraEntity = m_ActiveScene->CreateEntity("Camera A"); m_CameraEntity.AddComponent<CameraComponent>(); m_SecondCamera = m_ActiveScene->CreateEntity("Camera B"); auto& cc = m_SecondCamera.AddComponent<CameraComponent>(); cc.Primary = false; class CameraController : public ScriptableEntity { public: virtual void OnCreate() override { auto& translation = GetComponent<TransformComponent>().Translation; translation.x = rand() % 10 - 5.0f; } virtual void OnDestroy() override { } virtual void OnUpdate(Timestep ts) override { auto& translation = GetComponent<TransformComponent>().Translation; float speed = 5.0f; if (Input::IsKeyPressed(Key::A)) translation.x -= speed * ts; if (Input::IsKeyPressed(Key::D)) translation.x += speed * ts; if (Input::IsKeyPressed(Key::W)) translation.y += speed * ts; if (Input::IsKeyPressed(Key::S)) translation.y -= speed * ts; } }; m_CameraEntity.AddComponent<NativeScriptComponent>().Bind<CameraController>(); m_SecondCamera.AddComponent<NativeScriptComponent>().Bind<CameraController>(); #endif m_SceneHierarchyPanel.SetContext(m_ActiveScene); } void EditorLayer::OnDetach() { HZ_PROFILE_FUNCTION(); } void EditorLayer::OnUpdate(Timestep ts) { HZ_PROFILE_FUNCTION(); // Resize if (FramebufferSpecification spec = m_Framebuffer->GetSpecification(); m_ViewportSize.x > 0.0f && m_ViewportSize.y > 0.0f && // zero sized framebuffer is invalid (spec.Width != m_ViewportSize.x || spec.Height != m_ViewportSize.y)) { m_Framebuffer->Resize((uint32_t)m_ViewportSize.x, (uint32_t)m_ViewportSize.y); m_CameraController.OnResize(m_ViewportSize.x, m_ViewportSize.y); m_EditorCamera.SetViewportSize(m_ViewportSize.x, m_ViewportSize.y); m_ActiveScene->OnViewportResize((uint32_t)m_ViewportSize.x, (uint32_t)m_ViewportSize.y); } // Update if (m_ViewportFocused) m_CameraController.OnUpdate(ts); m_EditorCamera.OnUpdate(ts); // Render Renderer2D::ResetStats(); m_Framebuffer->Bind(); RenderCommand::SetClearColor({ 0.1f, 0.1f, 0.1f, 1 }); RenderCommand::Clear(); // Clear our entity ID attachment to -1 m_Framebuffer->ClearAttachment(1, -1); // Update scene m_ActiveScene->OnUpdateEditor(ts, m_EditorCamera); auto[mx, my] = ImGui::GetMousePos(); mx -= m_ViewportBounds[0].x; my -= m_ViewportBounds[0].y; glm::vec2 viewportSize = m_ViewportBounds[1] - m_ViewportBounds[0]; my = viewportSize.y - my; int mouseX = (int)mx; int mouseY = (int)my; if (mouseX >= 0 && mouseY >= 0 && mouseX < (int)viewportSize.x && mouseY < (int)viewportSize.y) { int pixelData = m_Framebuffer->ReadPixel(1, mouseX, mouseY); m_HoveredEntity = pixelData == -1 ? Entity() : Entity((entt::entity)pixelData, m_ActiveScene.get()); } m_Framebuffer->Unbind(); } void EditorLayer::OnImGuiRender() { HZ_PROFILE_FUNCTION(); // Note: Switch this to true to enable dockspace static bool dockspaceOpen = true; static bool opt_fullscreen_persistant = true; bool opt_fullscreen = opt_fullscreen_persistant; static ImGuiDockNodeFlags dockspace_flags = ImGuiDockNodeFlags_None; // We are using the ImGuiWindowFlags_NoDocking flag to make the parent window not dockable into, // because it would be confusing to have two docking targets within each others. ImGuiWindowFlags window_flags = ImGuiWindowFlags_MenuBar | ImGuiWindowFlags_NoDocking; if (opt_fullscreen) { ImGuiViewport* viewport = ImGui::GetMainViewport(); ImGui::SetNextWindowPos(viewport->Pos); ImGui::SetNextWindowSize(viewport->Size); ImGui::SetNextWindowViewport(viewport->ID); ImGui::PushStyleVar(ImGuiStyleVar_WindowRounding, 0.0f); ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 0.0f); window_flags |= ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoCollapse | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove; window_flags |= ImGuiWindowFlags_NoBringToFrontOnFocus | ImGuiWindowFlags_NoNavFocus; } // When using ImGuiDockNodeFlags_PassthruCentralNode, DockSpace() will render our background and handle the pass-thru hole, so we ask Begin() to not render a background. if (dockspace_flags & ImGuiDockNodeFlags_PassthruCentralNode) window_flags |= ImGuiWindowFlags_NoBackground; // Important: note that we proceed even if Begin() returns false (aka window is collapsed). // This is because we want to keep our DockSpace() active. If a DockSpace() is inactive, // all active windows docked into it will lose their parent and become undocked. // We cannot preserve the docking relationship between an active window and an inactive docking, otherwise // any change of dockspace/settings would lead to windows being stuck in limbo and never being visible. ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2(0.0f, 0.0f)); ImGui::Begin("DockSpace Demo", &dockspaceOpen, window_flags); ImGui::PopStyleVar(); if (opt_fullscreen) ImGui::PopStyleVar(2); // DockSpace ImGuiIO& io = ImGui::GetIO(); ImGuiStyle& style = ImGui::GetStyle(); float minWinSizeX = style.WindowMinSize.x; style.WindowMinSize.x = 370.0f; if (io.ConfigFlags & ImGuiConfigFlags_DockingEnable) { ImGuiID dockspace_id = ImGui::GetID("MyDockSpace"); ImGui::DockSpace(dockspace_id, ImVec2(0.0f, 0.0f), dockspace_flags); } style.WindowMinSize.x = minWinSizeX; if (ImGui::BeginMenuBar()) { if (ImGui::BeginMenu("File")) { // Disabling fullscreen would allow the window to be moved to the front of other windows, // which we can't undo at the moment without finer window depth/z control. //ImGui::MenuItem("Fullscreen", NULL, &opt_fullscreen_persistant);1 if (ImGui::MenuItem("New", "Ctrl+N")) NewScene(); if (ImGui::MenuItem("Open...", "Ctrl+O")) OpenScene(); if (ImGui::MenuItem("Save As...", "Ctrl+Shift+S")) SaveSceneAs(); if (ImGui::MenuItem("Exit")) Application::Get().Close(); ImGui::EndMenu(); } ImGui::EndMenuBar(); } m_SceneHierarchyPanel.OnImGuiRender(); ImGui::Begin("Stats"); std::string name = "None"; if (m_HoveredEntity) name = m_HoveredEntity.GetComponent<TagComponent>().Tag; ImGui::Text("Hovered Entity: %s", name.c_str()); auto stats = Renderer2D::GetStats(); ImGui::Text("Renderer2D Stats:"); ImGui::Text("Draw Calls: %d", stats.DrawCalls); ImGui::Text("Quads: %d", stats.QuadCount); ImGui::Text("Vertices: %d", stats.GetTotalVertexCount()); ImGui::Text("Indices: %d", stats.GetTotalIndexCount()); ImGui::End(); ImGui::PushStyleVar(ImGuiStyleVar_WindowPadding, ImVec2{ 0, 0 }); ImGui::Begin("Viewport"); auto viewportMinRegion = ImGui::GetWindowContentRegionMin(); auto viewportMaxRegion = ImGui::GetWindowContentRegionMax(); auto viewportOffset = ImGui::GetWindowPos(); m_ViewportBounds[0] = { viewportMinRegion.x + viewportOffset.x, viewportMinRegion.y + viewportOffset.y }; m_ViewportBounds[1] = { viewportMaxRegion.x + viewportOffset.x, viewportMaxRegion.y + viewportOffset.y }; m_ViewportFocused = ImGui::IsWindowFocused(); m_ViewportHovered = ImGui::IsWindowHovered(); Application::Get().GetImGuiLayer()->BlockEvents(!m_ViewportFocused && !m_ViewportHovered); ImVec2 viewportPanelSize = ImGui::GetContentRegionAvail(); m_ViewportSize = { viewportPanelSize.x, viewportPanelSize.y }; uint64_t textureID = m_Framebuffer->GetColorAttachmentRendererID(); ImGui::Image(reinterpret_cast<void*>(textureID), ImVec2{ m_ViewportSize.x, m_ViewportSize.y }, ImVec2{ 0, 1 }, ImVec2{ 1, 0 }); // Gizmos Entity selectedEntity = m_SceneHierarchyPanel.GetSelectedEntity(); if (selectedEntity && m_GizmoType != -1) { ImGuizmo::SetOrthographic(false); ImGuizmo::SetDrawlist(); ImGuizmo::SetRect(m_ViewportBounds[0].x, m_ViewportBounds[0].y, m_ViewportBounds[1].x - m_ViewportBounds[0].x, m_ViewportBounds[1].y - m_ViewportBounds[0].y); // Camera // Runtime camera from entity // auto cameraEntity = m_ActiveScene->GetPrimaryCameraEntity(); // const auto& camera = cameraEntity.GetComponent<CameraComponent>().Camera; // const glm::mat4& cameraProjection = camera.GetProjection(); // glm::mat4 cameraView = glm::inverse(cameraEntity.GetComponent<TransformComponent>().GetTransform()); // Editor camera const glm::mat4& cameraProjection = m_EditorCamera.GetProjection(); glm::mat4 cameraView = m_EditorCamera.GetViewMatrix(); // Entity transform auto& tc = selectedEntity.GetComponent<TransformComponent>(); glm::mat4 transform = tc.GetTransform(); // Snapping bool snap = Input::IsKeyPressed(Key::LeftControl); float snapValue = 0.5f; // Snap to 0.5m for translation/scale // Snap to 45 degrees for rotation if (m_GizmoType == ImGuizmo::OPERATION::ROTATE) snapValue = 45.0f; float snapValues[3] = { snapValue, snapValue, snapValue }; ImGuizmo::Manipulate(glm::value_ptr(cameraView), glm::value_ptr(cameraProjection), (ImGuizmo::OPERATION)m_GizmoType, ImGuizmo::LOCAL, glm::value_ptr(transform), nullptr, snap ? snapValues : nullptr); if (ImGuizmo::IsUsing()) { glm::vec3 translation, rotation, scale; Math::DecomposeTransform(transform, translation, rotation, scale); glm::vec3 deltaRotation = rotation - tc.Rotation; tc.Translation = translation; tc.Rotation += deltaRotation; tc.Scale = scale; } } ImGui::End(); ImGui::PopStyleVar(); ImGui::End(); } void EditorLayer::OnEvent(Event& e) { m_CameraController.OnEvent(e); m_EditorCamera.OnEvent(e); EventDispatcher dispatcher(e); dispatcher.Dispatch<KeyPressedEvent>(HZ_BIND_EVENT_FN(EditorLayer::OnKeyPressed)); dispatcher.Dispatch<MouseButtonPressedEvent>(HZ_BIND_EVENT_FN(EditorLayer::OnMouseButtonPressed)); } bool EditorLayer::OnKeyPressed(KeyPressedEvent& e) { // Shortcuts if (e.GetRepeatCount() > 0) return false; bool control = Input::IsKeyPressed(Key::LeftControl) || Input::IsKeyPressed(Key::RightControl); bool shift = Input::IsKeyPressed(Key::LeftShift) || Input::IsKeyPressed(Key::RightShift); switch (e.GetKeyCode()) { case Key::N: { if (control) NewScene(); break; } case Key::O: { if (control) OpenScene(); break; } case Key::S: { if (control && shift) SaveSceneAs(); break; } // Gizmos case Key::Q: { if (!ImGuizmo::IsUsing()) m_GizmoType = -1; break; } case Key::W: { if (!ImGuizmo::IsUsing()) m_GizmoType = ImGuizmo::OPERATION::TRANSLATE; break; } case Key::E: { if (!ImGuizmo::IsUsing()) m_GizmoType = ImGuizmo::OPERATION::ROTATE; break; } case Key::R: { if (!ImGuizmo::IsUsing()) m_GizmoType = ImGuizmo::OPERATION::SCALE; break; } } } bool EditorLayer::OnMouseButtonPressed(MouseButtonPressedEvent& e) { if (e.GetMouseButton() == Mouse::ButtonLeft) { if (m_ViewportHovered && !ImGuizmo::IsOver() && !Input::IsKeyPressed(Key::LeftAlt)) m_SceneHierarchyPanel.SetSelectedEntity(m_HoveredEntity); } return false; } void EditorLayer::NewScene() { m_ActiveScene = CreateRef<Scene>(); m_ActiveScene->OnViewportResize((uint32_t)m_ViewportSize.x, (uint32_t)m_ViewportSize.y); m_SceneHierarchyPanel.SetContext(m_ActiveScene); } void EditorLayer::OpenScene() { std::string filepath = FileDialogs::OpenFile("Hazel Scene (*.hazel)\0*.hazel\0"); if (!filepath.empty()) { m_ActiveScene = CreateRef<Scene>(); m_ActiveScene->OnViewportResize((uint32_t)m_ViewportSize.x, (uint32_t)m_ViewportSize.y); m_SceneHierarchyPanel.SetContext(m_ActiveScene); SceneSerializer serializer(m_ActiveScene); serializer.Deserialize(filepath); } } void EditorLayer::SaveSceneAs() { std::string filepath = FileDialogs::SaveFile("Hazel Scene (*.hazel)\0*.hazel\0"); if (!filepath.empty()) { SceneSerializer serializer(m_ActiveScene); serializer.Serialize(filepath); } } }
#include "../../include/FileHandling/FileConverter.h" FileConverter::FileConverter(){ cimg_library::cimg::imagemagick_path(imageMagickPath.c_str()); } FileConverter& FileConverter::instance(){ static FileConverter *instance = new FileConverter(); return *instance; } void FileConverter::ConvertFileToBMP(File& file){ cimg_library::CImg<unsigned char> image(file.fullPath.c_str()); switch(file.type){ case File::FileType::BMP: break; default: file.fullPath = file.relativePath + "/" + file.name +".bmp"; file.type = File::FileType::BMP; file.ext = "bmp"; image.save_bmp(file.fullPath.c_str()); break; } } void FileConverter::ConvertImageToBMP(File& file, cimg_library::CImg<unsigned char> img){ file.fullPath = file.relativePath + "/" + file.name +".bmp"; file.type = File::FileType::BMP; file.ext = "bmp"; img.save_bmp(file.fullPath.c_str()); } void FileConverter::ConvertImageToTIFF(File& file, cimg_library::CImg<unsigned char> img){ file.fullPath = file.relativePath + "/" + file.name +".tiff"; file.type = File::FileType::TIFF; file.ext = "tiff"; img.save_tiff(file.fullPath.c_str()); } void FileConverter::ConvertFileToTIFF(File& file){ cimg_library::CImg<unsigned char> image(file.fullPath.c_str()); switch(file.type){ case File::FileType::TIFF: break; default: file.fullPath = file.relativePath + "/" + file.name +".tiff"; file.type = File::FileType::TIFF; file.ext = "tiff"; image.save_tiff(file.fullPath.c_str()); break; } }
#include "config_impl.h" #include "retry_state_impl.h" #include "router.h" #include "envoy/event/dispatcher.h" #include "envoy/event/timer.h" #include "envoy/http/conn_pool.h" #include "envoy/runtime/runtime.h" #include "envoy/stats/stats.h" #include "envoy/upstream/cluster_manager.h" #include "envoy/upstream/upstream.h" #include "common/common/assert.h" #include "common/common/empty_string.h" #include "common/common/enum_to_int.h" #include "common/common/utility.h" #include "common/http/codes.h" #include "common/http/header_map_impl.h" #include "common/http/headers.h" #include "common/http/message_impl.h" #include "common/http/utility.h" namespace Router { void FilterUtility::setUpstreamScheme(Http::HeaderMap& headers, const Upstream::ClusterInfo& cluster) { if (cluster.sslContext()) { headers.insertScheme().value(Http::Headers::get().SchemeValues.Https); } else { headers.insertScheme().value(Http::Headers::get().SchemeValues.Http); } } bool FilterUtility::shouldShadow(const ShadowPolicy& policy, Runtime::Loader& runtime, uint64_t stable_random) { if (policy.cluster().empty()) { return false; } if (!policy.runtimeKey().empty() && !runtime.snapshot().featureEnabled(policy.runtimeKey(), 0, stable_random, 10000UL)) { return false; } return true; } FilterUtility::TimeoutData FilterUtility::finalTimeout(const RouteEntry& route, Http::HeaderMap& request_headers) { // See if there is a user supplied timeout in a request header. If there is we take that, // otherwise we use the default. TimeoutData timeout; timeout.global_timeout_ = route.timeout(); Http::HeaderEntry* header_timeout_entry = request_headers.EnvoyUpstreamRequestTimeoutMs(); uint64_t header_timeout; if (header_timeout_entry) { if (StringUtil::atoul(header_timeout_entry->value().c_str(), header_timeout)) { timeout.global_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestTimeoutMs(); } // See if there is a per try/retry timeout. If it's >= global we just ignore it. Http::HeaderEntry* per_try_timeout_entry = request_headers.EnvoyUpstreamRequestPerTryTimeoutMs(); if (per_try_timeout_entry) { if (StringUtil::atoul(per_try_timeout_entry->value().c_str(), header_timeout)) { timeout.per_try_timeout_ = std::chrono::milliseconds(header_timeout); } request_headers.removeEnvoyUpstreamRequestPerTryTimeoutMs(); } if (timeout.per_try_timeout_ >= timeout.global_timeout_) { timeout.per_try_timeout_ = std::chrono::milliseconds(0); } // See if there is any timeout to write in the expected timeout header. uint64_t expected_timeout = timeout.per_try_timeout_.count(); if (expected_timeout == 0) { expected_timeout = timeout.global_timeout_.count(); } if (expected_timeout > 0) { request_headers.insertEnvoyExpectedRequestTimeoutMs().value(expected_timeout); } return timeout; } Filter::~Filter() { // Upstream resources should already have been cleaned. ASSERT(!upstream_request_); ASSERT(!retry_state_); } const std::string& Filter::upstreamZone(Upstream::HostDescriptionPtr upstream_host) { return upstream_host ? upstream_host->zone() : EMPTY_STRING; } void Filter::chargeUpstreamCode(const Http::HeaderMap& response_headers, Upstream::HostDescriptionPtr upstream_host) { if (config_.emit_dynamic_stats_ && !callbacks_->requestInfo().healthCheck()) { const Http::HeaderEntry* upstream_canary_header = response_headers.EnvoyUpstreamCanary(); const Http::HeaderEntry* internal_request_header = downstream_headers_->EnvoyInternalRequest(); bool is_canary = (upstream_canary_header && upstream_canary_header->value() == "true") || (upstream_host ? upstream_host->canary() : false); bool internal_request = internal_request_header && internal_request_header->value() == "true"; Http::CodeUtility::ResponseStatInfo info{ config_.global_store_, cluster_->statsScope(), EMPTY_STRING, response_headers, internal_request, route_entry_->virtualHost().name(), request_vcluster_ ? request_vcluster_->name() : EMPTY_STRING, config_.local_info_.zoneName(), upstreamZone(upstream_host), is_canary}; Http::CodeUtility::chargeResponseStat(info); if (!alt_stat_prefix_.empty()) { Http::CodeUtility::ResponseStatInfo info{ config_.global_store_, cluster_->statsScope(), alt_stat_prefix_, response_headers, internal_request, EMPTY_STRING, EMPTY_STRING, config_.local_info_.zoneName(), upstreamZone(upstream_host), is_canary}; Http::CodeUtility::chargeResponseStat(info); } } } void Filter::chargeUpstreamCode(Http::Code code, Upstream::HostDescriptionPtr upstream_host) { Http::HeaderMapImpl fake_response_headers{ {Http::Headers::get().Status, std::to_string(enumToInt(code))}}; chargeUpstreamCode(fake_response_headers, upstream_host); } Http::FilterHeadersStatus Filter::decodeHeaders(Http::HeaderMap& headers, bool end_stream) { downstream_headers_ = &headers; // Only increment rq total stat if we actually decode headers here. This does not count requests // that get handled by earlier filters. config_.stats_.rq_total_.inc(); // Determine if there is a route entry or a redirect for the request. const Route* route = callbacks_->routeTable().route(headers); if (!route) { config_.stats_.no_route_.inc(); stream_log_debug("no cluster match for URL '{}'", *callbacks_, headers.Path()->value().c_str()); callbacks_->requestInfo().setResponseFlag(Http::AccessLog::ResponseFlag::NoRouteFound); Http::HeaderMapPtr response_headers{new Http::HeaderMapImpl{ {Http::Headers::get().Status, std::to_string(enumToInt(Http::Code::NotFound))}}}; callbacks_->encodeHeaders(std::move(response_headers), true); return Http::FilterHeadersStatus::StopIteration; } // Determine if there is a redirect for the request. if (route->redirectEntry()) { config_.stats_.rq_redirect_.inc(); Http::Utility::sendRedirect(*callbacks_, route->redirectEntry()->newPath(headers)); return Http::FilterHeadersStatus::StopIteration; } // A route entry matches for the request. route_entry_ = route->routeEntry(); // Set up stat prefixes, etc. request_vcluster_ = route_entry_->virtualCluster(headers); stream_log_debug("cluster '{}' match for URL '{}'", *callbacks_, route_entry_->clusterName(), headers.Path()->value().c_str()); cluster_ = config_.cm_.get(route_entry_->clusterName()); const Http::HeaderEntry* request_alt_name = headers.EnvoyUpstreamAltStatName(); if (request_alt_name) { alt_stat_prefix_ = std::string(request_alt_name->value().c_str()) + "."; headers.removeEnvoyUpstreamAltStatName(); } // See if we are supposed to immediately kill some percentage of this cluster's traffic. if (cluster_->maintenanceMode()) { callbacks_->requestInfo().setResponseFlag(Http::AccessLog::ResponseFlag::UpstreamOverflow); chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr); Http::Utility::sendLocalReply(*callbacks_, Http::Code::ServiceUnavailable, "maintenance mode"); return Http::FilterHeadersStatus::StopIteration; } // Fetch a connection pool for the upstream cluster. Http::ConnectionPool::Instance* conn_pool = config_.cm_.httpConnPoolForCluster(route_entry_->clusterName(), finalPriority()); if (!conn_pool) { sendNoHealthyUpstreamResponse(); return Http::FilterHeadersStatus::StopIteration; } timeout_ = FilterUtility::finalTimeout(*route_entry_, headers); route_entry_->finalizeRequestHeaders(headers); FilterUtility::setUpstreamScheme(headers, *cluster_); retry_state_ = createRetryState(route_entry_->retryPolicy(), headers, *cluster_, config_.runtime_, config_.random_, callbacks_->dispatcher(), finalPriority()); do_shadowing_ = FilterUtility::shouldShadow(route_entry_->shadowPolicy(), config_.runtime_, callbacks_->streamId()); #ifndef NDEBUG headers.iterate([](const Http::HeaderEntry& header, void* context) -> void { stream_log_debug(" '{}':'{}'", *static_cast<Http::StreamDecoderFilterCallbacks*>(context), header.key().c_str(), header.value().c_str()); }, callbacks_); #endif // Do a common header check. We make sure that all outgoing requests have all HTTP/2 headers. // These get stripped by HTTP/1 codec where applicable. ASSERT(headers.Scheme()); ASSERT(headers.Method()); ASSERT(headers.Host()); ASSERT(headers.Path()); upstream_request_.reset(new UpstreamRequest(*this, *conn_pool)); upstream_request_->encodeHeaders(end_stream); if (end_stream) { onRequestComplete(); } return Http::FilterHeadersStatus::StopIteration; } void Filter::sendNoHealthyUpstreamResponse() { callbacks_->requestInfo().setResponseFlag(Http::AccessLog::ResponseFlag::NoHealthyUpstream); chargeUpstreamCode(Http::Code::ServiceUnavailable, nullptr); Http::Utility::sendLocalReply(*callbacks_, Http::Code::ServiceUnavailable, "no healthy upstream"); } Http::FilterDataStatus Filter::decodeData(Buffer::Instance& data, bool end_stream) { bool buffering = (retry_state_ && retry_state_->enabled()) || do_shadowing_; // If we are going to buffer for retries or shadowing, we need to make a copy before encoding // since it's all moves from here on. if (buffering) { Buffer::OwnedImpl copy(data); upstream_request_->encodeData(copy, end_stream); } else { upstream_request_->encodeData(data, end_stream); } if (end_stream) { onRequestComplete(); } // If we are potentially going to retry or shadow this request we need to buffer. return buffering ? Http::FilterDataStatus::StopIterationAndBuffer : Http::FilterDataStatus::StopIterationNoBuffer; } Http::FilterTrailersStatus Filter::decodeTrailers(Http::HeaderMap& trailers) { downstream_trailers_ = &trailers; upstream_request_->encodeTrailers(trailers); onRequestComplete(); return Http::FilterTrailersStatus::StopIteration; } void Filter::cleanup() { upstream_request_.reset(); retry_state_.reset(); if (response_timeout_) { response_timeout_->disableTimer(); response_timeout_.reset(); } } Upstream::ResourcePriority Filter::finalPriority() { // Virtual cluster priority trumps route priority if the route has a virtual cluster. if (request_vcluster_) { return request_vcluster_->priority(); } else { return route_entry_->priority(); } } void Filter::maybeDoShadowing() { if (!do_shadowing_) { return; } ASSERT(!route_entry_->shadowPolicy().cluster().empty()); Http::MessagePtr request(new Http::RequestMessageImpl( Http::HeaderMapPtr{new Http::HeaderMapImpl(*downstream_headers_)})); if (callbacks_->decodingBuffer()) { request->body(Buffer::InstancePtr{new Buffer::OwnedImpl(*callbacks_->decodingBuffer())}); } if (downstream_trailers_) { request->trailers(Http::HeaderMapPtr{new Http::HeaderMapImpl(*downstream_trailers_)}); } config_.shadowWriter().shadow(route_entry_->shadowPolicy().cluster(), std::move(request), timeout_.global_timeout_); } void Filter::onRequestComplete() { downstream_end_stream_ = true; downstream_request_complete_time_ = std::chrono::system_clock::now(); // Possible that we got an immediate reset. if (upstream_request_) { // Even if we got an immediate reset, we could still shadow, but that is a riskier change and // seems unnecessary right now. maybeDoShadowing(); upstream_request_->setupPerTryTimeout(); if (timeout_.global_timeout_.count() > 0) { response_timeout_ = callbacks_->dispatcher().createTimer([this]() -> void { onResponseTimeout(); }); response_timeout_->enableTimer(timeout_.global_timeout_); } } } void Filter::onResetStream() { if (upstream_request_) { upstream_request_->resetStream(); } cleanup(); } void Filter::onResponseTimeout() { stream_log_debug("upstream timeout", *callbacks_); config_.cm_.get(route_entry_->clusterName())->stats().upstream_rq_timeout_.inc(); // It's possible to timeout during a retry backoff delay when we have no upstream request. In // this case we fake a reset since onUpstreamReset() doesn't care. if (upstream_request_) { if (upstream_request_->upstream_host_) { upstream_request_->upstream_host_->stats().rq_timeout_.inc(); } upstream_request_->resetStream(); } onUpstreamReset(UpstreamResetType::GlobalTimeout, Optional<Http::StreamResetReason>()); } void Filter::onUpstreamReset(UpstreamResetType type, const Optional<Http::StreamResetReason>& reset_reason) { ASSERT(type == UpstreamResetType::GlobalTimeout || upstream_request_); if (type == UpstreamResetType::Reset) { stream_log_debug("upstream reset", *callbacks_); } Upstream::HostDescriptionPtr upstream_host; if (upstream_request_) { upstream_host = upstream_request_->upstream_host_; if (upstream_host) { upstream_host->outlierDetector().putHttpResponseCode( enumToInt(type == UpstreamResetType::Reset ? Http::Code::ServiceUnavailable : Http::Code::GatewayTimeout)); } } // We don't retry on a global timeout or if we already started the response. if (type != UpstreamResetType::GlobalTimeout && !downstream_response_started_ && retry_state_ && retry_state_->shouldRetry(nullptr, reset_reason, [this]() -> void { doRetry(); }) && setupRetry(true)) { return; } // This will destroy any created retry timers. cleanup(); // If we have never sent any response, send a 503. Otherwise just reset the ongoing response. if (downstream_response_started_) { callbacks_->resetStream(); } else { Http::Code code; const char* body; if (type == UpstreamResetType::GlobalTimeout || type == UpstreamResetType::PerTryTimeout) { callbacks_->requestInfo().setResponseFlag( Http::AccessLog::ResponseFlag::UpstreamRequestTimeout); code = Http::Code::GatewayTimeout; body = "upstream request timeout"; } else { Http::AccessLog::ResponseFlag response_flags = streamResetReasonToResponseFlag(reset_reason.value()); callbacks_->requestInfo().setResponseFlag(response_flags); code = Http::Code::ServiceUnavailable; body = "upstream connect error or disconnect/reset before headers"; } chargeUpstreamCode(code, upstream_host); Http::Utility::sendLocalReply(*callbacks_, code, body); } } Http::AccessLog::ResponseFlag Filter::streamResetReasonToResponseFlag(Http::StreamResetReason reset_reason) { switch (reset_reason) { case Http::StreamResetReason::ConnectionFailure: return Http::AccessLog::ResponseFlag::UpstreamConnectionFailure; case Http::StreamResetReason::ConnectionTermination: return Http::AccessLog::ResponseFlag::UpstreamConnectionTermination; case Http::StreamResetReason::LocalReset: case Http::StreamResetReason::LocalRefusedStreamReset: return Http::AccessLog::ResponseFlag::LocalReset; case Http::StreamResetReason::Overflow: return Http::AccessLog::ResponseFlag::UpstreamOverflow; case Http::StreamResetReason::RemoteReset: case Http::StreamResetReason::RemoteRefusedStreamReset: return Http::AccessLog::ResponseFlag::UpstreamRemoteReset; } throw std::invalid_argument("Unknown reset_reason"); } void Filter::onUpstreamHeaders(Http::HeaderMapPtr&& headers, bool end_stream) { stream_log_debug("upstream headers complete: end_stream={}", *callbacks_, end_stream); ASSERT(!downstream_response_started_); upstream_request_->upstream_host_->outlierDetector().putHttpResponseCode( Http::Utility::getResponseStatus(*headers)); if (retry_state_ && retry_state_->shouldRetry(headers.get(), Optional<Http::StreamResetReason>(), [this]() -> void { doRetry(); }) && setupRetry(end_stream)) { Http::CodeUtility::chargeBasicResponseStat( cluster_->statsScope(), "retry.", static_cast<Http::Code>(Http::Utility::getResponseStatus(*headers))); return; } else { // Make sure any retry timers are destroyed since we may not call cleanup() if end_stream is // false. retry_state_.reset(); } // Only send upstream service time if we received the complete request and this is not a // premature response. if (DateUtil::timePointValid(downstream_request_complete_time_)) { std::chrono::milliseconds ms = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::system_clock::now() - downstream_request_complete_time_); headers->insertEnvoyUpstreamServiceTime().value(ms.count()); } upstream_request_->upstream_canary_ = (headers->EnvoyUpstreamCanary() && headers->EnvoyUpstreamCanary()->value() == "true") || upstream_request_->upstream_host_->canary(); chargeUpstreamCode(*headers, upstream_request_->upstream_host_); downstream_response_started_ = true; if (end_stream) { onUpstreamComplete(); } callbacks_->encodeHeaders(std::move(headers), end_stream); } void Filter::onUpstreamData(Buffer::Instance& data, bool end_stream) { if (end_stream) { onUpstreamComplete(); } callbacks_->encodeData(data, end_stream); } void Filter::onUpstreamTrailers(Http::HeaderMapPtr&& trailers) { onUpstreamComplete(); callbacks_->encodeTrailers(std::move(trailers)); } void Filter::onUpstreamComplete() { if (!downstream_end_stream_) { upstream_request_->resetStream(); } if (config_.emit_dynamic_stats_ && !callbacks_->requestInfo().healthCheck() && DateUtil::timePointValid(downstream_request_complete_time_)) { std::chrono::milliseconds response_time = std::chrono::duration_cast<std::chrono::milliseconds>( std::chrono::system_clock::now() - downstream_request_complete_time_); upstream_request_->upstream_host_->outlierDetector().putResponseTime(response_time); const Http::HeaderEntry* internal_request_header = downstream_headers_->EnvoyInternalRequest(); bool internal_request = internal_request_header && internal_request_header->value() == "true"; Http::CodeUtility::ResponseTimingInfo info{ config_.global_store_, cluster_->statsScope(), EMPTY_STRING, response_time, upstream_request_->upstream_canary_, internal_request, route_entry_->virtualHost().name(), request_vcluster_ ? request_vcluster_->name() : EMPTY_STRING, config_.local_info_.zoneName(), upstreamZone(upstream_request_->upstream_host_)}; Http::CodeUtility::chargeResponseTiming(info); if (!alt_stat_prefix_.empty()) { Http::CodeUtility::ResponseTimingInfo info{ config_.global_store_, cluster_->statsScope(), alt_stat_prefix_, response_time, upstream_request_->upstream_canary_, internal_request, EMPTY_STRING, EMPTY_STRING, config_.local_info_.zoneName(), upstreamZone(upstream_request_->upstream_host_)}; Http::CodeUtility::chargeResponseTiming(info); } } cleanup(); } bool Filter::setupRetry(bool end_stream) { // If we responded before the request was complete we don't bother doing a retry. This may not // catch certain cases where we are in full streaming mode and we have a connect timeout or an // overflow of some kind. However, in many cases deployments will use the buffer filter before // this filter which will make this a non-issue. The implementation of supporting retry in cases // where the request is not complete is more complicated so we will start with this for now. if (!downstream_end_stream_) { return false; } stream_log_debug("performing retry", *callbacks_); if (!end_stream) { upstream_request_->resetStream(); } upstream_request_.reset(); return true; } void Filter::doRetry() { Http::ConnectionPool::Instance* conn_pool = config_.cm_.httpConnPoolForCluster(route_entry_->clusterName(), finalPriority()); if (!conn_pool) { sendNoHealthyUpstreamResponse(); cleanup(); return; } ASSERT(response_timeout_ || timeout_.global_timeout_.count() == 0); ASSERT(!upstream_request_); upstream_request_.reset(new UpstreamRequest(*this, *conn_pool)); upstream_request_->encodeHeaders(!callbacks_->decodingBuffer() && !downstream_trailers_); // It's possible we got immediately reset. if (upstream_request_) { if (callbacks_->decodingBuffer()) { // If we are doing a retry we need to make a copy. Buffer::OwnedImpl copy(*callbacks_->decodingBuffer()); upstream_request_->encodeData(copy, !downstream_trailers_); } if (downstream_trailers_) { upstream_request_->encodeTrailers(*downstream_trailers_); } upstream_request_->setupPerTryTimeout(); } } Filter::UpstreamRequest::~UpstreamRequest() { if (per_try_timeout_) { // Allows for testing. per_try_timeout_->disableTimer(); } } void Filter::UpstreamRequest::decodeHeaders(Http::HeaderMapPtr&& headers, bool end_stream) { parent_.onUpstreamHeaders(std::move(headers), end_stream); } void Filter::UpstreamRequest::decodeData(Buffer::Instance& data, bool end_stream) { parent_.onUpstreamData(data, end_stream); } void Filter::UpstreamRequest::decodeTrailers(Http::HeaderMapPtr&& trailers) { parent_.onUpstreamTrailers(std::move(trailers)); } void Filter::UpstreamRequest::encodeHeaders(bool end_stream) { ASSERT(!encode_complete_); encode_complete_ = end_stream; // It's possible for a reset to happen inline within the newStream() call. In this case, we might // get deleted inline as well. Only write the returned handle out if it is not nullptr to deal // with this case. Http::ConnectionPool::Cancellable* handle = conn_pool_.newStream(*this, *this); if (handle) { conn_pool_stream_handle_ = handle; } } void Filter::UpstreamRequest::encodeData(Buffer::Instance& data, bool end_stream) { ASSERT(!encode_complete_); encode_complete_ = end_stream; if (!request_encoder_) { stream_log_trace("buffering {} bytes", *parent_.callbacks_, data.length()); if (!buffered_request_body_) { buffered_request_body_.reset(new Buffer::OwnedImpl()); } buffered_request_body_->move(data); } else { stream_log_trace("proxying {} bytes", *parent_.callbacks_, data.length()); request_encoder_->encodeData(data, end_stream); } } void Filter::UpstreamRequest::encodeTrailers(const Http::HeaderMap& trailers) { ASSERT(!encode_complete_); encode_complete_ = true; encode_trailers_ = true; if (!request_encoder_) { stream_log_trace("buffering trailers", *parent_.callbacks_); } else { stream_log_trace("proxying trailers", *parent_.callbacks_); request_encoder_->encodeTrailers(trailers); } } void Filter::UpstreamRequest::onResetStream(Http::StreamResetReason reason) { request_encoder_ = nullptr; if (!calling_encode_headers_) { parent_.onUpstreamReset(UpstreamResetType::Reset, Optional<Http::StreamResetReason>(reason)); } else { deferred_reset_reason_ = reason; } } void Filter::UpstreamRequest::resetStream() { if (conn_pool_stream_handle_) { stream_log_debug("cancelling pool request", *parent_.callbacks_); ASSERT(!request_encoder_); conn_pool_stream_handle_->cancel(); conn_pool_stream_handle_ = nullptr; } if (request_encoder_) { stream_log_debug("resetting pool request", *parent_.callbacks_); request_encoder_->getStream().removeCallbacks(*this); request_encoder_->getStream().resetStream(Http::StreamResetReason::LocalReset); } } void Filter::UpstreamRequest::setupPerTryTimeout() { ASSERT(!per_try_timeout_); if (parent_.timeout_.per_try_timeout_.count() > 0) { per_try_timeout_ = parent_.callbacks_->dispatcher().createTimer([this]() -> void { onPerTryTimeout(); }); per_try_timeout_->enableTimer(parent_.timeout_.per_try_timeout_); } } void Filter::UpstreamRequest::onPerTryTimeout() { stream_log_debug("upstream per try timeout", *parent_.callbacks_); parent_.config_.cm_.get(parent_.route_entry_->clusterName()) ->stats() .upstream_rq_per_try_timeout_.inc(); upstream_host_->stats().rq_timeout_.inc(); resetStream(); parent_.onUpstreamReset(UpstreamResetType::PerTryTimeout, Optional<Http::StreamResetReason>(Http::StreamResetReason::LocalReset)); } void Filter::UpstreamRequest::onPoolFailure(Http::ConnectionPool::PoolFailureReason reason, Upstream::HostDescriptionPtr host) { Http::StreamResetReason reset_reason = Http::StreamResetReason::ConnectionFailure; switch (reason) { case Http::ConnectionPool::PoolFailureReason::Overflow: reset_reason = Http::StreamResetReason::Overflow; break; case Http::ConnectionPool::PoolFailureReason::ConnectionFailure: reset_reason = Http::StreamResetReason::ConnectionFailure; break; } // Mimic an upstream reset. onUpstreamHostSelected(host); onResetStream(reset_reason); } void Filter::UpstreamRequest::onPoolReady(Http::StreamEncoder& request_encoder, Upstream::HostDescriptionPtr host) { stream_log_debug("pool ready", *parent_.callbacks_); onUpstreamHostSelected(host); request_encoder.getStream().addCallbacks(*this); conn_pool_stream_handle_ = nullptr; request_encoder_ = &request_encoder; calling_encode_headers_ = true; request_encoder.encodeHeaders(*parent_.downstream_headers_, !buffered_request_body_ && encode_complete_ && !encode_trailers_); calling_encode_headers_ = false; // It is possible to get reset in the middle of an encodeHeaders() call. This happens for example // in the http/2 codec if the frame cannot be encoded for some reason. This should never happen // but it's unclear if we have covered all cases so protect against it and test for it. One // specific example of a case where this happens is if we try to encode a total header size that // is too big in HTTP/2 (64K currently). if (deferred_reset_reason_.valid()) { onResetStream(deferred_reset_reason_.value()); } else { if (buffered_request_body_) { request_encoder.encodeData(*buffered_request_body_, encode_complete_ && !encode_trailers_); } if (encode_trailers_) { request_encoder.encodeTrailers(*parent_.downstream_trailers_); } } } RetryStatePtr ProdFilter::createRetryState(const RetryPolicy& policy, Http::HeaderMap& request_headers, const Upstream::ClusterInfo& cluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, Upstream::ResourcePriority priority) { return RetryStateImpl::create(policy, request_headers, cluster, runtime, random, dispatcher, priority); } } // Router
/************************************************************************ * RobWorkStudio Version 0.1 * Copyright (C) Robotics Group, Maersk Institute, University of Southern * Denmark. * * This Software is developed using the Qt Open Source Edition, and is * therefore only available under the GNU General Public License (GPL). * * RobWorkStudio can be used, modified and redistributed freely. * RobWorkStudio is distributed WITHOUT ANY WARRANTY; including the implied * warranty of merchantability, fitness for a particular purpose and * guarantee of future releases, maintenance and bug fixes. The authors * has no responsibility of continuous development, maintenance, support * and insurance of backwards capability in the future. * * Notice that RobWorkStudio relies on RobWork, which has a different * license. For more information goto your RobWork directory and read license.txt. ************************************************************************/ #include "SimUtilityPlugin.hpp" //#include <rw/common/TimerUtil.hpp> #include <RobWorkStudio.hpp> #include <rw/core/Log.hpp> #include <rw/core/Ptr.hpp> #include <rwsim/dynamics/DynamicWorkCell.hpp> #include <rwsimlibs/gui/GraspRestingPoseDialog.hpp> #include <rwsimlibs/gui/GraspSelectionDialog.hpp> #include <rwsimlibs/gui/RestingPoseDialog.hpp> #include <rwsimlibs/gui/SupportPoseAnalyserDialog.hpp> #include <QGridLayout> #include <QGroupBox> #include <QPushButton> #include <QTimer> #include <QVBoxLayout> #include <boost/bind.hpp> #include <sstream> using rw::core::Log; using rw::core::Ptr; using rw::kinematics::State; using rwsim::dynamics::DynamicWorkCell; using namespace rwsim::drawable; using rws::RobWorkStudioPlugin; SimUtilityPlugin::SimUtilityPlugin () : RobWorkStudioPlugin ("SimUtilityPlugin", QIcon (":/SimulationIcon.png")), _restPoseDialog (NULL), _poseAnalyserDialog (NULL), _graspRestPoseDialog (NULL) { // Construct layout and widget QWidget* widg = new QWidget (this); QVBoxLayout* toplay = new QVBoxLayout (widg); widg->setLayout (toplay); this->setWidget (widg); { int row = 0; QGroupBox* box = new QGroupBox ("Utilities", this); toplay->addWidget (box); QGridLayout* lay = new QGridLayout (box); box->setLayout (lay); { QPushButton* button = new QPushButton ("Resting pose"); connect (button, SIGNAL (clicked ()), this, SLOT (btnPressed ())); lay->addWidget (button, row++, 0); _restPoseBtn = button; } { QPushButton* button = new QPushButton ("Support pose"); connect (button, SIGNAL (clicked ()), this, SLOT (btnPressed ())); lay->addWidget (button, row++, 0); _poseAnalyserBtn = button; } { QPushButton* button = new QPushButton ("Tool eval"); connect (button, SIGNAL (clicked ()), this, SLOT (btnPressed ())); lay->addWidget (button, row++, 0); _toolEvalBtn = button; } { QPushButton* button = new QPushButton ("Grasp selection"); connect (button, SIGNAL (clicked ()), this, SLOT (btnPressed ())); lay->addWidget (button, row++, 0); _graspSelectBtn = button; } } _restPoseBtn->setEnabled (false); _poseAnalyserBtn->setEnabled (false); _toolEvalBtn->setEnabled (false); _graspSelectBtn->setEnabled (false); toplay->addStretch (1); } SimUtilityPlugin::~SimUtilityPlugin () {} void SimUtilityPlugin::btnPressed () { QObject* obj = sender (); // std::cout << "BtnPressed" << std::endl; if (obj == _poseAnalyserBtn) { if (_dwc == NULL) RW_THROW ("No dynamic workcell loaded!"); if (!_poseAnalyserDialog) { State state = getRobWorkStudio ()->getState (); rw::core::Ptr< rw::proximity::CollisionDetector > colDect = getRobWorkStudio ()->getCollisionDetector (); _poseAnalyserDialog = new SupportPoseAnalyserDialog ( state, _dwc.get (), colDect.get (), getRobWorkStudio (), NULL); connect (_poseAnalyserDialog, SIGNAL (stateChanged (const rw::kinematics::State&)), this, SLOT (stateChangedEvent (const rw::kinematics::State&))); } _poseAnalyserDialog->show (); _poseAnalyserDialog->raise (); _poseAnalyserDialog->activateWindow (); } else if (obj == _restPoseBtn) { if (_dwc == NULL) RW_THROW ("No dynamic workcell loaded!"); if (!_restPoseDialog) { State state = getRobWorkStudio ()->getState (); rw::core::Ptr< rw::proximity::CollisionDetector > colDect = getRobWorkStudio ()->getCollisionDetector (); _restPoseDialog = new RestingPoseDialog (state, _dwc.get (), colDect.get (), NULL); connect (_restPoseDialog, SIGNAL (stateChanged (const rw::kinematics::State&)), this, SLOT (stateChangedEvent (const rw::kinematics::State&))); } _restPoseDialog->show (); _restPoseDialog->raise (); _restPoseDialog->activateWindow (); } else if (obj == _toolEvalBtn) { if (_dwc == NULL) RW_THROW ("No dynamic workcell loaded!"); log ().info () << "creating grasp rest pose \n"; if (!_graspRestPoseDialog) { State state = getRobWorkStudio ()->getState (); rw::core::Ptr< rw::proximity::CollisionDetector > colDect = getRobWorkStudio ()->getCollisionDetector (); _graspRestPoseDialog = new GraspRestingPoseDialog (state, _dwc.get (), colDect.get (), NULL); connect (_graspRestPoseDialog, SIGNAL (stateChanged (const rw::kinematics::State&)), this, SLOT (stateChangedEvent (const rw::kinematics::State&))); connect (_graspRestPoseDialog, SIGNAL (restingPoseEvent (const RestingConfig&)), this, SLOT (restConfigEvent (const RestingConfig&))); } log ().info () << "Showing dialog!\n"; _graspRestPoseDialog->show (); _graspRestPoseDialog->raise (); _graspRestPoseDialog->activateWindow (); } else if (obj == _timer) { } else if (obj == _graspSelectBtn) { if (_dwc == NULL) RW_THROW ("No dynamic workcell loaded!"); if (!_graspSelectionDialog) { State state = getRobWorkStudio ()->getState (); rw::core::Ptr< rw::proximity::CollisionDetector > colDect = getRobWorkStudio ()->getCollisionDetector (); _graspSelectionDialog = new GraspSelectionDialog (state, _dwc.get (), colDect.get (), NULL); connect (_graspSelectionDialog, SIGNAL (stateChanged (const rw::kinematics::State&)), this, SLOT (stateChangedEvent (const rw::kinematics::State&))); } _graspSelectionDialog->show (); _graspSelectionDialog->raise (); _graspSelectionDialog->activateWindow (); } } void SimUtilityPlugin::restConfigEvent (const RestingConfig& restcfg) { // std::cout << "rest config event\n"; getRobWorkStudio ()->setState (restcfg._state); getRobWorkStudio ()->updateAndRepaint (); // Frame *world = _dwc->getWorkcell()->getWorldFrame(); // std::string gqual = // world->getPropertyMap().get<std::string>("GraspQuality",std::string("NO")); long time = // TimerUtil::currentTimeMs()/1000; std::stringstream sstr; // std::string pathPre = // world->getPropertyMap().get<std::string>("PathPre",std::string("c:/tmp")); int s = time; sstr // << pathPre << "/RestImageSave_" << gqual << ".png"; sstr << restcfg._desc << ".png"; getRobWorkStudio ()->getView ()->saveBufferToFile (sstr.str ().c_str ()); // std::cout << "rest config event saved" << std::endl; } void SimUtilityPlugin::stateChangedEvent (const rw::kinematics::State& state) { getRobWorkStudio ()->setState (state); } void SimUtilityPlugin::updateViewEvent () { if (!_restPoseDialog) return; // std::cout << "UpdateViewEvent" << std::endl; QObject* obj = sender (); if (obj == _restPoseDialog) { // std::cout << "UpdateViewEvent1" << std::endl; State state = _restPoseDialog->getState (); getRobWorkStudio ()->setState (state); } } void SimUtilityPlugin::open (rw::models::WorkCell* workcell) {} void SimUtilityPlugin::genericEventListener (const std::string& event) { // std::cout << "Generic event: " << event << std::endl; if (event == "DynamicWorkCellLoaded") { // get the dynamic workcell from the propertymap RW_DEBUG ("Getting dynamic workcell from propertymap!"); Ptr< DynamicWorkCell > dwc = getRobWorkStudio ()->getPropertyMap ().get< Ptr< DynamicWorkCell > > ("DynamicWorkcell", NULL); if (dwc == NULL) { std::cout << "Could not load dynamic workcell from propertymap!!" << std::endl; return; } _dwc = dwc; _restPoseBtn->setEnabled (true); _poseAnalyserBtn->setEnabled (true); _toolEvalBtn->setEnabled (true); _graspSelectBtn->setEnabled (true); if (getRobWorkStudio ()->getPropertyMap ().has ("Arg1")) { if (_dwc == NULL) RW_THROW ("No dynamic workcell loaded!"); log ().info () << "creating grasp rest pose \n"; if (!_graspRestPoseDialog) { State state = getRobWorkStudio ()->getState (); rw::core::Ptr< rw::proximity::CollisionDetector > colDect = getRobWorkStudio ()->getCollisionDetector (); _graspRestPoseDialog = new GraspRestingPoseDialog (state, _dwc.get (), colDect.get (), NULL); connect (_graspRestPoseDialog, SIGNAL (stateChanged (const rw::kinematics::State&)), this, SLOT (stateChangedEvent (const rw::kinematics::State&))); connect (_graspRestPoseDialog, SIGNAL (restingPoseEvent (const RestingConfig&)), this, SLOT (restConfigEvent (const RestingConfig&))); } log ().info () << "Showing dialog!\n"; _graspRestPoseDialog->show (); _graspRestPoseDialog->raise (); _graspRestPoseDialog->activateWindow (); std::string saveDir = getRobWorkStudio ()->getPropertyMap ().get< std::string > ("Arg2"); _graspRestPoseDialog->setSaveDir (saveDir); std::string preshape = getRobWorkStudio ()->getPropertyMap ().get< std::string > ("Arg3"); _graspRestPoseDialog->setPreshapeStrategy (preshape); std::string id = getRobWorkStudio ()->getPropertyMap ().get< std::string > ( "Arg4", std::string ("1")); _graspRestPoseDialog->setUniqueID (id); _graspRestPoseDialog->startAuto (); } } } void SimUtilityPlugin::close () { _restPoseBtn->setEnabled (false); _poseAnalyserBtn->setEnabled (false); _toolEvalBtn->setEnabled (false); _graspSelectBtn->setEnabled (false); _dwc = NULL; if (_restPoseDialog) delete _restPoseDialog; if (_poseAnalyserDialog) delete _poseAnalyserDialog; } void SimUtilityPlugin::stateChangedHandler (RobWorkStudioPlugin* sender) {} void SimUtilityPlugin::initialize () { getRobWorkStudio ()->genericEvent ().add ( boost::bind (&SimUtilityPlugin::genericEventListener, this, boost::arg< 1 > ()), this); Log::setLog (_log); _timer = new QTimer (NULL); _timer->setSingleShot (true); _timer->setInterval (4000); connect (_timer, SIGNAL (timeout ()), this, SLOT (btnPressed ())); }
/* * $Id$ * * Author: David Fournier * Copyright (c) 2008-2012 Regents of the University of California */ #include <admodel.h> void data_4array::allocate(int hhsl,int hhsu,int hsl,int hsu, int rmin,int rmax,int cmin,int cmax,const char * s) { named_d4_array::allocate(hhsl,hhsu,hsl,hsu,rmin,rmax,cmin,cmax,s); *(ad_comm::global_datafile) >> d4_array(*this); } void data_4array::allocate(ad_integer hhsl,ad_integer hhsu, const index_type& hsl, const index_type& hsu, const index_type& rmin, const index_type& rmax, const index_type& cmin, const index_type& cmax, const char *s) { named_d4_array::allocate(hhsl,hhsu,hsl,hsu,rmin,rmax,cmin,cmax,s); *(ad_comm::global_datafile) >> d4_array(*this); } void data_4iarray::allocate(int hhsl,int hhsu,int hsl,int hsu, int rmin,int rmax,int cmin,int cmax,const char * s) { named_i4_array::allocate(hhsl,hhsu,hsl,hsu,rmin,rmax,cmin,cmax,s); *(ad_comm::global_datafile) >> i4_array(*this); } void data_4iarray::allocate(ad_integer hhsl,ad_integer hhsu, const index_type& hsl, const index_type& hsu, const index_type& rmin, const index_type& rmax, const index_type& cmin, const index_type& cmax, const char *s) { named_i4_array::allocate(hhsl,hhsu,hsl,hsu,rmin,rmax,cmin,cmax,s); *(ad_comm::global_datafile) >> i4_array(*this); }
<?hh // strict /** * @copyright 2010-2015, The Titon Project * @license http://opensource.org/licenses/bsd-license.php * @link http://titon.io */ namespace Titon\Utility; use Titon\Common\Macroable; use Titon\Io\Reader; type ConfigMap = Map<string, mixed>; /** * Stores the current configuration options for the application. * Configuration can be loaded from multiple sources including environment, bootstraps and internal system classes. * Various readers can be used to import specific configuration files. * * @package Titon\Utility */ class Config { use Macroable; /** * Current loaded configuration. * * @var \Titon\Utility\ConfigMap */ protected static ConfigMap $_config = Map {}; /** * Add a value to a key. If the value is not a vector, make it one. * * @param string $key * @param mixed $value */ public static function add(string $key, mixed $value): void { $data = Col::toVector(static::get($key, Vector {})); $data[] = $value; static::set($key, $data); } /** * Return all configuration. * * @return \Titon\Utility\ConfigMap */ public static function all(): ConfigMap { return static::$_config; } /** * Get the currently defined encoding for the application. * * @return string */ public static function encoding(): string { return (string) static::get('app.encoding') ?: 'UTF-8'; } /** * Flush configuration by removing all settings. */ public static function flush(): void { static::$_config->clear(); } /** * Grab a value from the current configuration. * * @param string $key * @param mixed $default * @return mixed */ public static function get(string $key, mixed $default = null): mixed { $value = Col::get(static::$_config, $key); if ($value === null) { return $default; } return $value; } /** * Checks to see if a key exists within the current configuration. * * @param string $key * @return bool */ public static function has(string $key): bool { return Col::has(static::$_config, $key); } /** * Loads a user created file into the configuration class. * Uses the defined reader to parse the file. * * @param string $key * @param \Titon\Io\Reader $reader */ public static function load(string $key, Reader $reader): void { static::$_config[$key] = $reader->readResource(); } /** * Grabs the defined project name. * * @return string */ public static function name(): string { return (string) static::get('app.name', ''); } /** * Remove a value from the config. * * @param string $key */ public static function remove(string $key): void { Col::remove(static::$_config, $key); } /** * Get the currently defined salt for the application. * * @return string */ public static function salt(): string { return (string) static::get('app.salt', ''); } /** * Add values to the current loaded configuration. * * @param string $key * @param mixed $value */ public static function set(string $key, mixed $value): void { Col::set(static::$_config, $key, $value); } }
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #include <aws/ssm/model/DescribePatchGroupsResult.h> #include <aws/core/utils/json/JsonSerializer.h> #include <aws/core/AmazonWebServiceResult.h> #include <aws/core/utils/StringUtils.h> #include <aws/core/utils/UnreferencedParam.h> #include <utility> using namespace Aws::SSM::Model; using namespace Aws::Utils::Json; using namespace Aws::Utils; using namespace Aws; DescribePatchGroupsResult::DescribePatchGroupsResult() { } DescribePatchGroupsResult::DescribePatchGroupsResult(const Aws::AmazonWebServiceResult<JsonValue>& result) { *this = result; } DescribePatchGroupsResult& DescribePatchGroupsResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result) { JsonView jsonValue = result.GetPayload().View(); if(jsonValue.ValueExists("Mappings")) { Array<JsonView> mappingsJsonList = jsonValue.GetArray("Mappings"); for(unsigned mappingsIndex = 0; mappingsIndex < mappingsJsonList.GetLength(); ++mappingsIndex) { m_mappings.push_back(mappingsJsonList[mappingsIndex].AsObject()); } } if(jsonValue.ValueExists("NextToken")) { m_nextToken = jsonValue.GetString("NextToken"); } return *this; }
#include "TimePeriod.h" #include <Poco/DateTime.h> #include <Poco/Timezone.h> using namespace Poco; // ------------------------------------------------------------------------ Timespan TimePeriod::TimeWithinTimePeriod(const TimePeriod &aTp) const { // Completely before the interval? if (mTEnd < aTp.mTStart) return 0; // Completely after the interval? if (mTStart > aTp.mTEnd) return 0; Timestamp lTTheEnd; if (mTEnd < aTp.mTEnd) lTTheEnd = mTEnd; else lTTheEnd = aTp.mTEnd; Timestamp lTTheStart; if (mTStart > aTp.mTStart) lTTheStart = mTStart; else lTTheStart = aTp.mTStart; return lTTheEnd - lTTheStart; } // ------------------------------------------------------------------------ AbsoluteTimePeriod& AbsoluteTimePeriod::operator++() { SetToContain(mTEnd); return *this; } AbsoluteTimePeriod& AbsoluteTimePeriod::operator--() { // Back up 1 second from start, into the previous period. mTStart -= (Timespan(0, 0, 0, 1, 0)).totalMicroseconds(); SetToContain(mTStart); return *this; } AbsoluteTimePeriod::AbsoluteTimePeriod(TimePeriodType aTpt, Timestamp aTContained) { mTpt = aTpt; SetToContain(aTContained); } void AbsoluteTimePeriod::SetToContain(const Timestamp &aT) { DateTime lDt(aT); switch (mTpt) { case TptDay: mTStart = DateTime(lDt.year(), lDt.month(), lDt.day()).timestamp(); mTEnd = mTStart + Timespan(1, 0, 0, 0, 0).totalMicroseconds(); break; case TptWeek: { int lIDow = lDt.dayOfWeek(); mTStart = DateTime(lDt.year(), lDt.month(), lDt.day()).timestamp(); mTStart -= Timespan(lIDow, 0, 0, 0, 0).totalMicroseconds(); mTEnd = mTStart + Timespan(7, 0, 0, 0, 0).totalMicroseconds(); break; } case TptMonth: mTStart = DateTime(lDt.year(), lDt.month(), 1).timestamp(); if (lDt.month() == 12) mTEnd = DateTime(lDt.year() + 1, 1, 1).timestamp(); else mTEnd = DateTime(lDt.year(), lDt.month() + 1, 1).timestamp(); break; case TptYear: mTStart = DateTime(lDt.year(), 1, 1).timestamp(); mTEnd = DateTime(lDt.year() + 1, 1, 1).timestamp(); break; } } // ------------------------------------------------------------------------ Timestamp ToLocalTime(const Timestamp &aT) { Timestamp lT(aT); Timestamp::TimeDiff lTd(Poco::Timezone::tzd()); lTd *= 1000000; lT += lTd; return lT; } Timestamp FromLocalTime(const Timestamp &aT) { Timestamp lT(aT); Timestamp::TimeDiff lTd(Poco::Timezone::tzd()); lTd *= 1000000; lT -= lTd; return lT; }
#include "render.hpp" /** * A method to get the pure color of a point, if an Object has been hit. * @param ray * @param depth depth of the ray */ glm::vec3 Render::get_color (Ray const& ray, int depth) const { // Get Intersected Object here glm::vec3 hit_point; glm::vec3 hit_normal; float dist = INFINITY; std::shared_ptr<Shape> shape; composite_->get_intersected_shape (ray, shape, hit_point, hit_normal, dist); // shape has not been hit if (dist == INFINITY) { return skybox_.get_color (ray.direction); } // get attributes from material float glossy = shape->get_material()->glossy; float opacity = 1-shape->get_material()->opacity; // The color gets calculated according to the lecture glm::vec3 color_final = get_brightness_color (ray, hit_point, hit_normal, depth, shape); // Now we can calculate the reflective and refractive color of the material glm::vec3 reflection_color; glm::vec3 refraction_color; // calculates a maximum of n reflections because depth < n if (depth < 2) { // Color calculations reflection if (glossy > 0) { // max number of rays is 4, if the depth (iteration) is greater than 1, we do only have one new ray // roughness = 0 -> number of rays = 1, else number of rays = 1 if depth = 0 else number of rays = 2 int reflection_samples = 1;//(roughness == 0.0f ? 1 : 1 * (depth == 0) + 1); // (depth == 0) if true, its a 1, 1*1 + 1 = 2, only if the depth is 0 otherwise its 1 // n + 1 rays in the first iteration, only 1 in each following iteration. If the roughness is 0.0f, we do always have one new ray only for (int i = 0; i < reflection_samples; ++i) { reflection_color += get_reflected_color (ray, hit_point, hit_normal, depth, shape); // increasing the depth is not really necessary } reflection_color *= glossy / (float) reflection_samples; } // Color calculations refractions if (opacity > 0) { // max number of rays is 4, if the depth (iteration) is greater than 1, we do only have one new ray // roughness = 0 -> number of rays = 1, else number of rays = 1 if depth = 0 else number of rays = 2 int refraction_samples = 1;//(roughness == 0.0f ? 1 : 1 * (depth == 0) + 1); // (depth == 0) if true, its a 1, 1*1 + 1 = 2, only if the depth is 0 otherwise its 1 // n + 1 rays in the first iteration, only 1 in each following iteration. If the roughness is 0.0f, we do always have one new ray only for (int i = 0; i < refraction_samples; ++i) { refraction_color += get_refracted_color(ray, hit_point, hit_normal, depth, shape);// increasing the depth is not really necessary } refraction_color *= opacity / (float) refraction_samples; } } // Color mixing for different opacity and glossyness values color_final *= (1.0f - opacity); color_final *= (1.0f - glossy); color_final += reflection_color + refraction_color; return color_final / glm::length(glm::vec3{1 - opacity, opacity, glossy}); } /** * Set the composite object for this Renderer * All shapes inside the composite object will be drawn. * @param composite_1 object with the information to be rendered */ void Render::set_composite (std::shared_ptr<Composite> const& composite_1) { composite_ = composite_1; } /** Set the lights for this Renderer @param lights_1 lights vector with the information to be rendered */ void Render::set_lights (std::vector<std::shared_ptr<Light>> const& lights_1) { lights_ = lights_1; } /** Set the ambient light for this Renderer @param ambient */ void Render::set_ambient_scene (glm::vec3 const& ambient) { ambient_scene_ = ambient; } /** * Correct calculation for the brightness from the presentation * @param ray which was tested * @param hit_point of the hit * @param hit_normal of the hit * @param depth of the hit * @param shape hit (to get the material) * @return color */ glm::vec3 Render::get_brightness_color (Ray const &ray, glm::vec3 const& hit_point, glm::vec3 const& hit_normal, int depth, std::shared_ptr<Shape> const& shape) const { // ambient light not visible because it doesnt hit anything glm::vec3 intensity_color = shape->get_material()->color_ambient * ambient_scene_; glm::vec3 specular; glm::vec3 diffuse; float light_distance; float angle; glm::vec3 hit_to_light; for (int i = 0; i < lights_.size(); ++i) { hit_to_light = (lights_[i]->position - hit_point); light_distance = glm::length (hit_to_light); hit_to_light = glm::normalize (hit_to_light); angle = glm::dot(hit_normal, hit_to_light); // normal facing in direction of light if (0 < angle && depth < 20) { // Specular: glm::vec3 mirror_to_light = glm::normalize(get_reflected_vec3(-hit_to_light, hit_normal)); float stength_specular = glm::dot(-ray.direction, mirror_to_light); stength_specular = (float) std::pow(stength_specular, shape->get_material()->reflective_exponent); specular = shape->get_material()->color_specular * stength_specular; // Diffuse: diffuse = shape->get_material()->color_diffuse * angle + specular; diffuse *= lights_[i]->brightness; // New Shadow calculations // No. of samples = important if range != 0 glm::vec3 offset = glm::vec3{0.0f, 0.0f, 0.0f}; // new position to shoot a ray from to not hit the last shape hit instantly glm::vec3 newpos = hit_point + hit_normal * 0.0001f; // for soft shadows if (lights_[i]->hardness < 1.0f) { offset = glm::normalize (glm::vec3{random_float(), random_float(), random_float()}) * (1.0f - lights_[i]->hardness); } // offset is now a vec3 with a random direction and the length of roughness offset += glm::normalize(hit_to_light) * 0.001f; glm::vec3 new_hit_point; glm::vec3 new_hit_normal; float new_dist = INFINITY; std::shared_ptr<Shape> new_shape; composite_->get_intersected_shape ({newpos, offset}, new_shape, new_hit_point,new_hit_normal, new_dist); // light is closer than anything hit if (INFINITY == new_dist || light_distance < new_dist) { // The original hit_point is in the light intensity_color += diffuse/(light_distance * light_distance); } } } return intensity_color; } /** * Function to calculate the color of the refracted ray * @param ray stores information about the refractive index of the new/ old object to make the correct calculations * the ray does have a refractive index of 1.0 in the beginning regardless of its actual position -> wrong calculations when * the camera starts inside an object/ two shapes intersect * * It works for chromatic aberration as well, but is really expensive * @param hit_point Point where the old ray has intersected the shape * @param hit_normal normal at the intersected position * @param depth of the ray to set a recursion depth * @param shape pointer of the last intersected shape (for material/ refractive index information) * @return the calculated color after all recursions came to an end */ glm::vec3 Render::get_refracted_color (Ray const& ray, glm::vec3 const& hit_point, glm::vec3 const& hit_normal, int depth, std::shared_ptr<Shape> const& shape) const { glm::vec3 total_color; int max = 3; // for red, green and blue (for chromatic aberration) int min = 0; // start if (depth > 1 || shape->get_material()->aberration_strength <= 0) { min = 1; // only one color value will be calculated max = 2; // only one color value will be calculated } for (int i = min; i < max; ++i) { float aberration = (float) i - 1; // scale of the aberration aberration *= -shape->get_material()->aberration_strength; glm::vec3 position; glm::vec3 refracted_vector; // Camera is in air, Normal is ok if (depth % 2 == 0) { // small offset to not intersect the last shape position = hit_point - hit_normal * 0.0001f; // the 1 is the refractive index of air (could be changed to any medium if we wanted) refracted_vector = get_refracted_vec3 (ray.direction, hit_normal, (1 / (shape->get_material()->refractive_index + aberration))); } else { // small offset to not intersect the last shape position = hit_point + hit_normal * 0.0001f; // the 1 is the refractive index of air (could be changed to any medium if we wanted) refracted_vector = get_refracted_vec3(ray.direction, -hit_normal, ((shape->get_material()->refractive_index + aberration) / 1)); } if (shape->get_material()->roughness != 0.0f) { // Adds a vector in a random direction with the length of the roughness refracted_vector += glm::normalize (glm::vec3{random_float(), random_float(), random_float()}) * shape->get_material()->roughness; } glm::vec3 color = get_color({position, refracted_vector}, depth + 1); total_color[i] = color[i]; if (depth > 1 || shape->get_material()->aberration_strength <= 0.0f) { // no chromatic aberation at all total_color = color; } } return total_color; } /** * Function to calculate the color of the reflected ray * @param ray will be reflected and sent away * @param hit_point Point where the old ray has intersected the shape * @param hit_normal normal at the intersected position * @param depth of the ray to set a recursion depth * @param shape pointer of the last intersected shape (for material/ refractive index information later on) * @return the calculated color after all recursions came to an end */ glm::vec3 Render::get_reflected_color (Ray const& ray, glm::vec3 const& hit_point, glm::vec3 const& hit_normal, int depth, std::shared_ptr<Shape> const& shape) const { glm::vec3 reflected_vector = get_reflected_vec3(ray.direction, hit_normal); // no roughness = no multiple rays to get a mat material effect if (shape->get_material()->roughness != 0.0f) { glm::vec3 offset = {0.0f, 0.0f, 0.0f}; // the first vector does not need to be changed, but I did it this way for now for (int i = 0; i < 3; ++i) { offset[i] = random_float(); } offset = glm::normalize(offset) * shape->get_material()->roughness; reflected_vector = reflected_vector + offset; } Ray reflection_ray {hit_point + hit_normal * 0.0001f, reflected_vector}; return get_color(reflection_ray, depth); } /** * for roughness to get a value between -1 and 1 * @return random float value */ float Render::random_float() const { // range from -range to range return ((((float) rand()) / (float) (RAND_MAX / 2.0f)) - 1.0f); } /** * Function to get a refracted Vector of a normal vector * @param vector input * @param normal normal of camera * @param n n of Material1/ n of Material2 * @return new refracted vector */ glm::vec3 Render::get_refracted_vec3 (glm::vec3 const& vector, glm::vec3 const& normal, float n) const { float cos_i = -glm::dot(normal, vector); float sin_t2 = (n * n * (1.0f - (float) (cos_i * cos_i))); if (sin_t2 > 1.0f) return get_reflected_vec3(vector, normal); return vector * n + (normal * (float) (n * cos_i - sqrt(1.0f - sin_t2))); } /** * Function to get a reflected Vector of a normal * @param vector input to be reflected * @param normal vector to be reflected of * @return reflected vector (not normalized) */ glm::vec3 Render::get_reflected_vec3 (glm::vec3 const& vector, glm::vec3 const& normal) const { return normal * (float) (2.0f * fmax(glm::dot(normal, -vector), 0.0f)) + vector; }
#include "Camera3D.hpp" void Camera3D::createProjectionMatrix(unsigned int width, unsigned int height){ m_projectionMatrix = glm::perspective(glm::radians(90.0f), width/(float)height, 0.1f, 1000.0f); } const glm::mat4& Camera3D::getProjectionMatrix(){ return m_projectionMatrix; } const glm::mat4& Camera3D::getViewMatrix(){ return m_viewMatrix; } const glm::vec3& Camera3D::getPosition(){ return m_position; } const glm::vec3& Camera3D::getDirection(){ return m_position; } void Camera3D::updateViewMatrix(){ m_viewMatrix = glm::lookAt(m_position, m_position + m_direction, glm::vec3(0, 1, 0)); }
/*************************************************************************** * Copyright (c) 2017, Sylvain Corlay and Johan Mabille * * * * Distributed under the terms of the BSD 3-Clause License. * * * * The full license is in the file LICENSE, distributed with this software. * ****************************************************************************/ #ifndef XWIDGETS_SELECTION_CONTAINER_HPP #define XWIDGETS_SELECTION_CONTAINER_HPP #include <string> #include <vector> #include "xtl/xoptional.hpp" #include "xbox.hpp" #include "xmaterialize.hpp" namespace xw { /************************************ * xselection_container declaration * ************************************/ template <class D> class xselection_container : public xbox<D> { public: using base_type = xbox<D>; using derived_type = D; using titles_type = std::vector<std::string>; void serialize_state(xeus::xjson&, xeus::buffer_sequence&) const; void apply_patch(const xeus::xjson&, const xeus::buffer_sequence&); XPROPERTY(titles_type, derived_type, _titles); XPROPERTY(xtl::xoptional<int>, derived_type, selected_index, 0); void set_title(typename titles_type::size_type i, std::string title); protected: xselection_container(); using base_type::base_type; private: void set_defaults(); }; /*************************************** * xselection_container implementation * ***************************************/ template <class D> inline void xselection_container<D>::serialize_state(xeus::xjson& state, xeus::buffer_sequence& buffers) const { base_type::serialize_state(state, buffers); set_patch_from_property(_titles, state, buffers); set_patch_from_property(selected_index, state, buffers); } template <class D> inline void xselection_container<D>::apply_patch(const xeus::xjson& patch, const xeus::buffer_sequence& buffers) { base_type::apply_patch(patch, buffers); set_property_from_patch(_titles, patch, buffers); set_property_from_patch(selected_index, patch, buffers); } template <class D> inline xselection_container<D>::xselection_container() : base_type() { set_defaults(); } template <class D> inline void xselection_container<D>::set_defaults() { } template <class D> inline void xselection_container<D>::set_title(typename titles_type::size_type i, std::string title) { if (_titles().size() != this->children().size()) { _titles() = titles_type(this->children().size()); } _titles()[i] = title; xeus::xjson state; xeus::buffer_sequence buffers; set_patch_from_property(_titles, state, buffers); this->send_patch(std::move(state), std::move(buffers)); } } #endif
#include "xc7vx485t.h" xc7vx485t::xc7vx485t() : fpga("XC7VX485T", "Xilinx Virtex-7 XC7VX485T (VC707)", 0x03687093, 0x0FFFFFFF, 6)//, 0x9, 0x02) { ircodes["IDCODE"] = 0x09; ircodes["BYPASS"] = 0x3F; ircodes["EXTEST"] = 0x26; ircodes["SAMPLE"] = 0x01; ircodes["PRELOAD"] = 0x01; ircodes["USERCODE"] = 0x08; ircodes["HIGHZ"] = 0x0A; ircodes["EXTEST_PULSE"] = 0x3C; ircodes["EXTEST_TRAIN"] = 0x3D; ircodes["ISC_ENABLE"] = 0x10; ircodes["ISC_PROGRAM"] = 0x11; ircodes["ISC_NOOP"] = 0x14; ircodes["XSC_READ_RSVD"] = 0x15; ircodes["ISC_DISABLE"] = 0x16; ircodes["XSC_PROGRAM_KEY"] = 0x12; ircodes["XSC_DNA"] = 0x17; ircodes["CFG_OUT"] = 0x04; ircodes["CFG_IN"] = 0x05; ircodes["JPROGRAM"] = 0x0B; ircodes["JSTART"] = 0x0C; ircodes["JSHUTDOWN"] = 0x0D; ircodes["FUSE_CTS"] = 0x30; ircodes["FUSE_KEY"] = 0x31; ircodes["FUSE_DNA"] = 0x32; ircodes["FUSE_USER"] = 0x33; ircodes["FUSE_CNTL"] = 0x34; ircodes["USER1"] = 0x02; ircodes["USER2"] = 0x03; ircodes["USER3"] = 0x22; ircodes["USER4"] = 0x23; ircodes["XADC_DRP"] = 0x37; ircodes["INTEST_RSVD"] = 0x07; } xc7vx485t::~xc7vx485t() { }
// Copyright (c) 2011-2016 The Bitcoin Core developers // Copyright (c) 2017 The Nyc3 Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #if defined(HAVE_CONFIG_H) #include "config/nyc3-config.h" #endif #include "rpcconsole.h" #include "ui_debugwindow.h" #include "bantablemodel.h" #include "clientmodel.h" #include "guiutil.h" #include "platformstyle.h" #include "chainparams.h" #include "netbase.h" #include "rpc/server.h" #include "rpc/client.h" #include "util.h" #include <openssl/crypto.h> #include <univalue.h> #ifdef ENABLE_WALLET #include <db_cxx.h> #include <wallet/wallet.h> #endif #include <QDesktopWidget> #include <QKeyEvent> #include <QMenu> #include <QMessageBox> #include <QScrollBar> #include <QSettings> #include <QSignalMapper> #include <QThread> #include <QTime> #include <QTimer> #include <QStringList> #if QT_VERSION < 0x050000 #include <QUrl> #endif // TODO: add a scrollback limit, as there is currently none // TODO: make it possible to filter out categories (esp debug messages when implemented) // TODO: receive errors and debug messages through ClientModel const int CONSOLE_HISTORY = 50; const int INITIAL_TRAFFIC_GRAPH_MINS = 30; const QSize FONT_RANGE(4, 40); const char fontSizeSettingsKey[] = "consoleFontSize"; const struct { const char *url; const char *source; } ICON_MAPPING[] = { {"cmd-request", ":/icons/tx_input"}, {"cmd-reply", ":/icons/tx_output"}, {"cmd-error", ":/icons/tx_output"}, {"misc", ":/icons/tx_inout"}, {nullptr, nullptr} }; namespace { // don't add private key handling cmd's to the history const QStringList historyFilter = QStringList() << "importprivkey" << "importmulti" << "signmessagewithprivkey" << "signrawtransaction" << "walletpassphrase" << "walletpassphrasechange" << "encryptwallet"; } /* Object for executing console RPC commands in a separate thread. */ class RPCExecutor : public QObject { Q_OBJECT public Q_SLOTS: void request(const QString &command); Q_SIGNALS: void reply(int category, const QString &command); }; /** Class for handling RPC timers * (used for e.g. re-locking the wallet after a timeout) */ class QtRPCTimerBase: public QObject, public RPCTimerBase { Q_OBJECT public: QtRPCTimerBase(std::function<void(void)>& _func, int64_t millis): func(_func) { timer.setSingleShot(true); connect(&timer, SIGNAL(timeout()), this, SLOT(timeout())); timer.start(millis); } ~QtRPCTimerBase() {} private Q_SLOTS: void timeout() { func(); } private: QTimer timer; std::function<void(void)> func; }; class QtRPCTimerInterface: public RPCTimerInterface { public: ~QtRPCTimerInterface() {} const char *Name() { return "Qt"; } RPCTimerBase* NewTimer(std::function<void(void)>& func, int64_t millis) { return new QtRPCTimerBase(func, millis); } }; #include "rpcconsole.moc" /** * Split shell command line into a list of arguments and optionally execute the command(s). * Aims to emulate \c bash and friends. * * - Command nesting is possible with parenthesis; for example: validateaddress(getnewaddress()) * - Arguments are delimited with whitespace or comma * - Extra whitespace at the beginning and end and between arguments will be ignored * - Text can be "double" or 'single' quoted * - The backslash \c \ is used as escape character * - Outside quotes, any character can be escaped * - Within double quotes, only escape \c " and backslashes before a \c " or another backslash * - Within single quotes, no escaping is possible and no special interpretation takes place * * @param[out] result stringified Result from the executed command(chain) * @param[in] strCommand Command line to split * @param[in] fExecute set true if you want the command to be executed * @param[out] pstrFilteredOut Command line, filtered to remove any sensitive data */ bool RPCConsole::RPCParseCommandLine(std::string &strResult, const std::string &strCommand, const bool fExecute, std::string * const pstrFilteredOut) { std::vector< std::vector<std::string> > stack; stack.push_back(std::vector<std::string>()); enum CmdParseState { STATE_EATING_SPACES, STATE_EATING_SPACES_IN_ARG, STATE_EATING_SPACES_IN_BRACKETS, STATE_ARGUMENT, STATE_SINGLEQUOTED, STATE_DOUBLEQUOTED, STATE_ESCAPE_OUTER, STATE_ESCAPE_DOUBLEQUOTED, STATE_COMMAND_EXECUTED, STATE_COMMAND_EXECUTED_INNER } state = STATE_EATING_SPACES; std::string curarg; UniValue lastResult; unsigned nDepthInsideSensitive = 0; size_t filter_begin_pos = 0, chpos; std::vector<std::pair<size_t, size_t>> filter_ranges; auto add_to_current_stack = [&](const std::string& strArg) { if (stack.back().empty() && (!nDepthInsideSensitive) && historyFilter.contains(QString::fromStdString(strArg), Qt::CaseInsensitive)) { nDepthInsideSensitive = 1; filter_begin_pos = chpos; } // Make sure stack is not empty before adding something if (stack.empty()) { stack.push_back(std::vector<std::string>()); } stack.back().push_back(strArg); }; auto close_out_params = [&]() { if (nDepthInsideSensitive) { if (!--nDepthInsideSensitive) { assert(filter_begin_pos); filter_ranges.push_back(std::make_pair(filter_begin_pos, chpos)); filter_begin_pos = 0; } } stack.pop_back(); }; std::string strCommandTerminated = strCommand; if (strCommandTerminated.back() != '\n') strCommandTerminated += "\n"; for (chpos = 0; chpos < strCommandTerminated.size(); ++chpos) { char ch = strCommandTerminated[chpos]; switch(state) { case STATE_COMMAND_EXECUTED_INNER: case STATE_COMMAND_EXECUTED: { bool breakParsing = true; switch(ch) { case '[': curarg.clear(); state = STATE_COMMAND_EXECUTED_INNER; break; default: if (state == STATE_COMMAND_EXECUTED_INNER) { if (ch != ']') { // append char to the current argument (which is also used for the query command) curarg += ch; break; } if (curarg.size() && fExecute) { // if we have a value query, query arrays with index and objects with a string key UniValue subelement; if (lastResult.isArray()) { for(char argch: curarg) if (!std::isdigit(argch)) throw std::runtime_error("Invalid result query"); subelement = lastResult[atoi(curarg.c_str())]; } else if (lastResult.isObject()) subelement = find_value(lastResult, curarg); else throw std::runtime_error("Invalid result query"); //no array or object: abort lastResult = subelement; } state = STATE_COMMAND_EXECUTED; break; } // don't break parsing when the char is required for the next argument breakParsing = false; // pop the stack and return the result to the current command arguments close_out_params(); // don't stringify the json in case of a string to avoid doublequotes if (lastResult.isStr()) curarg = lastResult.get_str(); else curarg = lastResult.write(2); // if we have a non empty result, use it as stack argument otherwise as general result if (curarg.size()) { if (stack.size()) add_to_current_stack(curarg); else strResult = curarg; } curarg.clear(); // assume eating space state state = STATE_EATING_SPACES; } if (breakParsing) break; } case STATE_ARGUMENT: // In or after argument case STATE_EATING_SPACES_IN_ARG: case STATE_EATING_SPACES_IN_BRACKETS: case STATE_EATING_SPACES: // Handle runs of whitespace switch(ch) { case '"': state = STATE_DOUBLEQUOTED; break; case '\'': state = STATE_SINGLEQUOTED; break; case '\\': state = STATE_ESCAPE_OUTER; break; case '(': case ')': case '\n': if (state == STATE_EATING_SPACES_IN_ARG) throw std::runtime_error("Invalid Syntax"); if (state == STATE_ARGUMENT) { if (ch == '(' && stack.size() && stack.back().size() > 0) { if (nDepthInsideSensitive) { ++nDepthInsideSensitive; } stack.push_back(std::vector<std::string>()); } // don't allow commands after executed commands on baselevel if (!stack.size()) throw std::runtime_error("Invalid Syntax"); add_to_current_stack(curarg); curarg.clear(); state = STATE_EATING_SPACES_IN_BRACKETS; } if ((ch == ')' || ch == '\n') && stack.size() > 0) { if (fExecute) { // Convert argument list to JSON objects in method-dependent way, // and pass it along with the method name to the dispatcher. JSONRPCRequest req; req.params = RPCConvertValues(stack.back()[0], std::vector<std::string>(stack.back().begin() + 1, stack.back().end())); req.strMethod = stack.back()[0]; #ifdef ENABLE_WALLET // TODO: Move this logic to WalletModel if (!vpwallets.empty()) { // in Qt, use always the wallet with index 0 when running with multiple wallets QByteArray encodedName = QUrl::toPercentEncoding(QString::fromStdString(vpwallets[0]->GetName())); req.URI = "/wallet/"+std::string(encodedName.constData(), encodedName.length()); } #endif lastResult = tableRPC.execute(req); } state = STATE_COMMAND_EXECUTED; curarg.clear(); } break; case ' ': case ',': case '\t': if(state == STATE_EATING_SPACES_IN_ARG && curarg.empty() && ch == ',') throw std::runtime_error("Invalid Syntax"); else if(state == STATE_ARGUMENT) // Space ends argument { add_to_current_stack(curarg); curarg.clear(); } if ((state == STATE_EATING_SPACES_IN_BRACKETS || state == STATE_ARGUMENT) && ch == ',') { state = STATE_EATING_SPACES_IN_ARG; break; } state = STATE_EATING_SPACES; break; default: curarg += ch; state = STATE_ARGUMENT; } break; case STATE_SINGLEQUOTED: // Single-quoted string switch(ch) { case '\'': state = STATE_ARGUMENT; break; default: curarg += ch; } break; case STATE_DOUBLEQUOTED: // Double-quoted string switch(ch) { case '"': state = STATE_ARGUMENT; break; case '\\': state = STATE_ESCAPE_DOUBLEQUOTED; break; default: curarg += ch; } break; case STATE_ESCAPE_OUTER: // '\' outside quotes curarg += ch; state = STATE_ARGUMENT; break; case STATE_ESCAPE_DOUBLEQUOTED: // '\' in double-quoted text if(ch != '"' && ch != '\\') curarg += '\\'; // keep '\' for everything but the quote and '\' itself curarg += ch; state = STATE_DOUBLEQUOTED; break; } } if (pstrFilteredOut) { if (STATE_COMMAND_EXECUTED == state) { assert(!stack.empty()); close_out_params(); } *pstrFilteredOut = strCommand; for (auto i = filter_ranges.rbegin(); i != filter_ranges.rend(); ++i) { pstrFilteredOut->replace(i->first, i->second - i->first, "(…)"); } } switch(state) // final state { case STATE_COMMAND_EXECUTED: if (lastResult.isStr()) strResult = lastResult.get_str(); else strResult = lastResult.write(2); case STATE_ARGUMENT: case STATE_EATING_SPACES: return true; default: // ERROR to end in one of the other states return false; } } void RPCExecutor::request(const QString &command) { try { std::string result; std::string executableCommand = command.toStdString() + "\n"; if(!RPCConsole::RPCExecuteCommandLine(result, executableCommand)) { Q_EMIT reply(RPCConsole::CMD_ERROR, QString("Parse error: unbalanced ' or \"")); return; } Q_EMIT reply(RPCConsole::CMD_REPLY, QString::fromStdString(result)); } catch (UniValue& objError) { try // Nice formatting for standard-format error { int code = find_value(objError, "code").get_int(); std::string message = find_value(objError, "message").get_str(); Q_EMIT reply(RPCConsole::CMD_ERROR, QString::fromStdString(message) + " (code " + QString::number(code) + ")"); } catch (const std::runtime_error&) // raised when converting to invalid type, i.e. missing code or message { // Show raw JSON object Q_EMIT reply(RPCConsole::CMD_ERROR, QString::fromStdString(objError.write())); } } catch (const std::exception& e) { Q_EMIT reply(RPCConsole::CMD_ERROR, QString("Error: ") + QString::fromStdString(e.what())); } } RPCConsole::RPCConsole(const PlatformStyle *_platformStyle, QWidget *parent) : QWidget(parent), ui(new Ui::RPCConsole), clientModel(0), historyPtr(0), platformStyle(_platformStyle), peersTableContextMenu(0), banTableContextMenu(0), consoleFontSize(0) { ui->setupUi(this); QSettings settings; if (!restoreGeometry(settings.value("RPCConsoleWindowGeometry").toByteArray())) { // Restore failed (perhaps missing setting), center the window move(QApplication::desktop()->availableGeometry().center() - frameGeometry().center()); } ui->openDebugLogfileButton->setToolTip(ui->openDebugLogfileButton->toolTip().arg(tr(PACKAGE_NAME))); if (platformStyle->getImagesOnButtons()) { ui->openDebugLogfileButton->setIcon(platformStyle->SingleColorIcon(":/icons/export")); } ui->clearButton->setIcon(platformStyle->SingleColorIcon(":/icons/remove")); ui->fontBiggerButton->setIcon(platformStyle->SingleColorIcon(":/icons/fontbigger")); ui->fontSmallerButton->setIcon(platformStyle->SingleColorIcon(":/icons/fontsmaller")); // Install event filter for up and down arrow ui->lineEdit->installEventFilter(this); ui->messagesWidget->installEventFilter(this); connect(ui->clearButton, SIGNAL(clicked()), this, SLOT(clear())); connect(ui->fontBiggerButton, SIGNAL(clicked()), this, SLOT(fontBigger())); connect(ui->fontSmallerButton, SIGNAL(clicked()), this, SLOT(fontSmaller())); connect(ui->btnClearTrafficGraph, SIGNAL(clicked()), ui->trafficGraph, SLOT(clear())); // set library version labels #ifdef ENABLE_WALLET ui->berkeleyDBVersion->setText(DbEnv::version(0, 0, 0)); #else ui->label_berkeleyDBVersion->hide(); ui->berkeleyDBVersion->hide(); #endif // Register RPC timer interface rpcTimerInterface = new QtRPCTimerInterface(); // avoid accidentally overwriting an existing, non QTThread // based timer interface RPCSetTimerInterfaceIfUnset(rpcTimerInterface); setTrafficGraphRange(INITIAL_TRAFFIC_GRAPH_MINS); ui->detailWidget->hide(); ui->peerHeading->setText(tr("Select a peer to view detailed information.")); consoleFontSize = settings.value(fontSizeSettingsKey, QFontInfo(QFont()).pointSize()).toInt(); clear(); } RPCConsole::~RPCConsole() { QSettings settings; settings.setValue("RPCConsoleWindowGeometry", saveGeometry()); RPCUnsetTimerInterface(rpcTimerInterface); delete rpcTimerInterface; delete ui; } bool RPCConsole::eventFilter(QObject* obj, QEvent *event) { if(event->type() == QEvent::KeyPress) // Special key handling { QKeyEvent *keyevt = static_cast<QKeyEvent*>(event); int key = keyevt->key(); Qt::KeyboardModifiers mod = keyevt->modifiers(); switch(key) { case Qt::Key_Up: if(obj == ui->lineEdit) { browseHistory(-1); return true; } break; case Qt::Key_Down: if(obj == ui->lineEdit) { browseHistory(1); return true; } break; case Qt::Key_PageUp: /* pass paging keys to messages widget */ case Qt::Key_PageDown: if(obj == ui->lineEdit) { QApplication::postEvent(ui->messagesWidget, new QKeyEvent(*keyevt)); return true; } break; case Qt::Key_Return: case Qt::Key_Enter: // forward these events to lineEdit if(obj == autoCompleter->popup()) { QApplication::postEvent(ui->lineEdit, new QKeyEvent(*keyevt)); return true; } break; default: // Typing in messages widget brings focus to line edit, and redirects key there // Exclude most combinations and keys that emit no text, except paste shortcuts if(obj == ui->messagesWidget && ( (!mod && !keyevt->text().isEmpty() && key != Qt::Key_Tab) || ((mod & Qt::ControlModifier) && key == Qt::Key_V) || ((mod & Qt::ShiftModifier) && key == Qt::Key_Insert))) { ui->lineEdit->setFocus(); QApplication::postEvent(ui->lineEdit, new QKeyEvent(*keyevt)); return true; } } } return QWidget::eventFilter(obj, event); } void RPCConsole::setClientModel(ClientModel *model) { clientModel = model; ui->trafficGraph->setClientModel(model); if (model && clientModel->getPeerTableModel() && clientModel->getBanTableModel()) { // Keep up to date with client setNumConnections(model->getNumConnections()); connect(model, SIGNAL(numConnectionsChanged(int)), this, SLOT(setNumConnections(int))); setNumBlocks(model->getNumBlocks(), model->getLastBlockDate(), model->getVerificationProgress(nullptr), false); connect(model, SIGNAL(numBlocksChanged(int,QDateTime,double,bool)), this, SLOT(setNumBlocks(int,QDateTime,double,bool))); updateNetworkState(); connect(model, SIGNAL(networkActiveChanged(bool)), this, SLOT(setNetworkActive(bool))); updateTrafficStats(model->getTotalBytesRecv(), model->getTotalBytesSent()); connect(model, SIGNAL(bytesChanged(quint64,quint64)), this, SLOT(updateTrafficStats(quint64, quint64))); connect(model, SIGNAL(mempoolSizeChanged(long,size_t)), this, SLOT(setMempoolSize(long,size_t))); // set up peer table ui->peerWidget->setModel(model->getPeerTableModel()); ui->peerWidget->verticalHeader()->hide(); ui->peerWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); ui->peerWidget->setSelectionBehavior(QAbstractItemView::SelectRows); ui->peerWidget->setSelectionMode(QAbstractItemView::ExtendedSelection); ui->peerWidget->setContextMenuPolicy(Qt::CustomContextMenu); ui->peerWidget->setColumnWidth(PeerTableModel::Address, ADDRESS_COLUMN_WIDTH); ui->peerWidget->setColumnWidth(PeerTableModel::Subversion, SUBVERSION_COLUMN_WIDTH); ui->peerWidget->setColumnWidth(PeerTableModel::Ping, PING_COLUMN_WIDTH); ui->peerWidget->horizontalHeader()->setStretchLastSection(true); // create peer table context menu actions QAction* disconnectAction = new QAction(tr("&Disconnect"), this); QAction* banAction1h = new QAction(tr("Ban for") + " " + tr("1 &hour"), this); QAction* banAction24h = new QAction(tr("Ban for") + " " + tr("1 &day"), this); QAction* banAction7d = new QAction(tr("Ban for") + " " + tr("1 &week"), this); QAction* banAction365d = new QAction(tr("Ban for") + " " + tr("1 &year"), this); // create peer table context menu peersTableContextMenu = new QMenu(this); peersTableContextMenu->addAction(disconnectAction); peersTableContextMenu->addAction(banAction1h); peersTableContextMenu->addAction(banAction24h); peersTableContextMenu->addAction(banAction7d); peersTableContextMenu->addAction(banAction365d); // Add a signal mapping to allow dynamic context menu arguments. // We need to use int (instead of int64_t), because signal mapper only supports // int or objects, which is okay because max bantime (1 year) is < int_max. QSignalMapper* signalMapper = new QSignalMapper(this); signalMapper->setMapping(banAction1h, 60*60); signalMapper->setMapping(banAction24h, 60*60*24); signalMapper->setMapping(banAction7d, 60*60*24*7); signalMapper->setMapping(banAction365d, 60*60*24*365); connect(banAction1h, SIGNAL(triggered()), signalMapper, SLOT(map())); connect(banAction24h, SIGNAL(triggered()), signalMapper, SLOT(map())); connect(banAction7d, SIGNAL(triggered()), signalMapper, SLOT(map())); connect(banAction365d, SIGNAL(triggered()), signalMapper, SLOT(map())); connect(signalMapper, SIGNAL(mapped(int)), this, SLOT(banSelectedNode(int))); // peer table context menu signals connect(ui->peerWidget, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(showPeersTableContextMenu(const QPoint&))); connect(disconnectAction, SIGNAL(triggered()), this, SLOT(disconnectSelectedNode())); // peer table signal handling - update peer details when selecting new node connect(ui->peerWidget->selectionModel(), SIGNAL(selectionChanged(const QItemSelection &, const QItemSelection &)), this, SLOT(peerSelected(const QItemSelection &, const QItemSelection &))); // peer table signal handling - update peer details when new nodes are added to the model connect(model->getPeerTableModel(), SIGNAL(layoutChanged()), this, SLOT(peerLayoutChanged())); // peer table signal handling - cache selected node ids connect(model->getPeerTableModel(), SIGNAL(layoutAboutToBeChanged()), this, SLOT(peerLayoutAboutToChange())); // set up ban table ui->banlistWidget->setModel(model->getBanTableModel()); ui->banlistWidget->verticalHeader()->hide(); ui->banlistWidget->setEditTriggers(QAbstractItemView::NoEditTriggers); ui->banlistWidget->setSelectionBehavior(QAbstractItemView::SelectRows); ui->banlistWidget->setSelectionMode(QAbstractItemView::SingleSelection); ui->banlistWidget->setContextMenuPolicy(Qt::CustomContextMenu); ui->banlistWidget->setColumnWidth(BanTableModel::Address, BANSUBNET_COLUMN_WIDTH); ui->banlistWidget->setColumnWidth(BanTableModel::Bantime, BANTIME_COLUMN_WIDTH); ui->banlistWidget->horizontalHeader()->setStretchLastSection(true); // create ban table context menu action QAction* unbanAction = new QAction(tr("&Unban"), this); // create ban table context menu banTableContextMenu = new QMenu(this); banTableContextMenu->addAction(unbanAction); // ban table context menu signals connect(ui->banlistWidget, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(showBanTableContextMenu(const QPoint&))); connect(unbanAction, SIGNAL(triggered()), this, SLOT(unbanSelectedNode())); // ban table signal handling - clear peer details when clicking a peer in the ban table connect(ui->banlistWidget, SIGNAL(clicked(const QModelIndex&)), this, SLOT(clearSelectedNode())); // ban table signal handling - ensure ban table is shown or hidden (if empty) connect(model->getBanTableModel(), SIGNAL(layoutChanged()), this, SLOT(showOrHideBanTableIfRequired())); showOrHideBanTableIfRequired(); // Provide initial values ui->clientVersion->setText(model->formatFullVersion()); ui->clientUserAgent->setText(model->formatSubVersion()); ui->dataDir->setText(model->dataDir()); ui->startupTime->setText(model->formatClientStartupTime()); ui->networkName->setText(QString::fromStdString(Params().NetworkIDString())); //Setup autocomplete and attach it QStringList wordList; std::vector<std::string> commandList = tableRPC.listCommands(); for (size_t i = 0; i < commandList.size(); ++i) { wordList << commandList[i].c_str(); wordList << ("help " + commandList[i]).c_str(); } wordList.sort(); autoCompleter = new QCompleter(wordList, this); autoCompleter->setModelSorting(QCompleter::CaseSensitivelySortedModel); ui->lineEdit->setCompleter(autoCompleter); autoCompleter->popup()->installEventFilter(this); // Start thread to execute RPC commands. startExecutor(); } if (!model) { // Client model is being set to 0, this means shutdown() is about to be called. // Make sure we clean up the executor thread Q_EMIT stopExecutor(); thread.wait(); } } static QString categoryClass(int category) { switch(category) { case RPCConsole::CMD_REQUEST: return "cmd-request"; break; case RPCConsole::CMD_REPLY: return "cmd-reply"; break; case RPCConsole::CMD_ERROR: return "cmd-error"; break; default: return "misc"; } } void RPCConsole::fontBigger() { setFontSize(consoleFontSize+1); } void RPCConsole::fontSmaller() { setFontSize(consoleFontSize-1); } void RPCConsole::setFontSize(int newSize) { QSettings settings; //don't allow an insane font size if (newSize < FONT_RANGE.width() || newSize > FONT_RANGE.height()) return; // temp. store the console content QString str = ui->messagesWidget->toHtml(); // replace font tags size in current content str.replace(QString("font-size:%1pt").arg(consoleFontSize), QString("font-size:%1pt").arg(newSize)); // store the new font size consoleFontSize = newSize; settings.setValue(fontSizeSettingsKey, consoleFontSize); // clear console (reset icon sizes, default stylesheet) and re-add the content float oldPosFactor = 1.0 / ui->messagesWidget->verticalScrollBar()->maximum() * ui->messagesWidget->verticalScrollBar()->value(); clear(false); ui->messagesWidget->setHtml(str); ui->messagesWidget->verticalScrollBar()->setValue(oldPosFactor * ui->messagesWidget->verticalScrollBar()->maximum()); } void RPCConsole::clear(bool clearHistory) { ui->messagesWidget->clear(); if(clearHistory) { history.clear(); historyPtr = 0; } ui->lineEdit->clear(); ui->lineEdit->setFocus(); // Add smoothly scaled icon images. // (when using width/height on an img, Qt uses nearest instead of linear interpolation) for(int i=0; ICON_MAPPING[i].url; ++i) { ui->messagesWidget->document()->addResource( QTextDocument::ImageResource, QUrl(ICON_MAPPING[i].url), platformStyle->SingleColorImage(ICON_MAPPING[i].source).scaled(QSize(consoleFontSize*2, consoleFontSize*2), Qt::IgnoreAspectRatio, Qt::SmoothTransformation)); } // Set default style sheet QFontInfo fixedFontInfo(GUIUtil::fixedPitchFont()); ui->messagesWidget->document()->setDefaultStyleSheet( QString( "table { }" "td.time { color: #808080; font-size: %2; padding-top: 3px; } " "td.message { font-family: %1; font-size: %2; white-space:pre-wrap; } " "td.cmd-request { color: #006060; } " "td.cmd-error { color: red; } " ".secwarning { color: red; }" "b { color: #006060; } " ).arg(fixedFontInfo.family(), QString("%1pt").arg(consoleFontSize)) ); #ifdef Q_OS_MAC QString clsKey = "(⌘)-L"; #else QString clsKey = "Ctrl-L"; #endif message(CMD_REPLY, (tr("Welcome to the %1 RPC console.").arg(tr(PACKAGE_NAME)) + "<br>" + tr("Use up and down arrows to navigate history, and %1 to clear screen.").arg("<b>"+clsKey+"</b>") + "<br>" + tr("Type <b>help</b> for an overview of available commands.")) + "<br><span class=\"secwarning\">" + tr("WARNING: Scammers have been active, telling users to type commands here, stealing their wallet contents. Do not use this console without fully understanding the ramifications of a command.") + "</span>", true); } void RPCConsole::keyPressEvent(QKeyEvent *event) { if(windowType() != Qt::Widget && event->key() == Qt::Key_Escape) { close(); } } void RPCConsole::message(int category, const QString &message, bool html) { QTime time = QTime::currentTime(); QString timeString = time.toString(); QString out; out += "<table><tr><td class=\"time\" width=\"65\">" + timeString + "</td>"; out += "<td class=\"icon\" width=\"32\"><img src=\"" + categoryClass(category) + "\"></td>"; out += "<td class=\"message " + categoryClass(category) + "\" valign=\"middle\">"; if(html) out += message; else out += GUIUtil::HtmlEscape(message, false); out += "</td></tr></table>"; ui->messagesWidget->append(out); } void RPCConsole::updateNetworkState() { QString connections = QString::number(clientModel->getNumConnections()) + " ("; connections += tr("In:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_IN)) + " / "; connections += tr("Out:") + " " + QString::number(clientModel->getNumConnections(CONNECTIONS_OUT)) + ")"; if(!clientModel->getNetworkActive()) { connections += " (" + tr("Network activity disabled") + ")"; } ui->numberOfConnections->setText(connections); } void RPCConsole::setNumConnections(int count) { if (!clientModel) return; updateNetworkState(); } void RPCConsole::setNetworkActive(bool networkActive) { updateNetworkState(); } void RPCConsole::setNumBlocks(int count, const QDateTime& blockDate, double nVerificationProgress, bool headers) { if (!headers) { ui->numberOfBlocks->setText(QString::number(count)); ui->lastBlockTime->setText(blockDate.toString()); } } void RPCConsole::setMempoolSize(long numberOfTxs, size_t dynUsage) { ui->mempoolNumberTxs->setText(QString::number(numberOfTxs)); if (dynUsage < 1000000) ui->mempoolSize->setText(QString::number(dynUsage/1000.0, 'f', 2) + " KB"); else ui->mempoolSize->setText(QString::number(dynUsage/1000000.0, 'f', 2) + " MB"); } void RPCConsole::on_lineEdit_returnPressed() { QString cmd = ui->lineEdit->text(); if(!cmd.isEmpty()) { std::string strFilteredCmd; try { std::string dummy; if (!RPCParseCommandLine(dummy, cmd.toStdString(), false, &strFilteredCmd)) { // Failed to parse command, so we cannot even filter it for the history throw std::runtime_error("Invalid command line"); } } catch (const std::exception& e) { QMessageBox::critical(this, "Error", QString("Error: ") + QString::fromStdString(e.what())); return; } ui->lineEdit->clear(); cmdBeforeBrowsing = QString(); message(CMD_REQUEST, QString::fromStdString(strFilteredCmd)); Q_EMIT cmdRequest(cmd); cmd = QString::fromStdString(strFilteredCmd); // Remove command, if already in history history.removeOne(cmd); // Append command to history history.append(cmd); // Enforce maximum history size while(history.size() > CONSOLE_HISTORY) history.removeFirst(); // Set pointer to end of history historyPtr = history.size(); // Scroll console view to end scrollToEnd(); } } void RPCConsole::browseHistory(int offset) { // store current text when start browsing through the history if (historyPtr == history.size()) { cmdBeforeBrowsing = ui->lineEdit->text(); } historyPtr += offset; if(historyPtr < 0) historyPtr = 0; if(historyPtr > history.size()) historyPtr = history.size(); QString cmd; if(historyPtr < history.size()) cmd = history.at(historyPtr); else if (!cmdBeforeBrowsing.isNull()) { cmd = cmdBeforeBrowsing; } ui->lineEdit->setText(cmd); } void RPCConsole::startExecutor() { RPCExecutor *executor = new RPCExecutor(); executor->moveToThread(&thread); // Replies from executor object must go to this object connect(executor, SIGNAL(reply(int,QString)), this, SLOT(message(int,QString))); // Requests from this object must go to executor connect(this, SIGNAL(cmdRequest(QString)), executor, SLOT(request(QString))); // On stopExecutor signal // - quit the Qt event loop in the execution thread connect(this, SIGNAL(stopExecutor()), &thread, SLOT(quit())); // - queue executor for deletion (in execution thread) connect(&thread, SIGNAL(finished()), executor, SLOT(deleteLater()), Qt::DirectConnection); // Default implementation of QThread::run() simply spins up an event loop in the thread, // which is what we want. thread.start(); } void RPCConsole::on_tabWidget_currentChanged(int index) { if (ui->tabWidget->widget(index) == ui->tab_console) ui->lineEdit->setFocus(); else if (ui->tabWidget->widget(index) != ui->tab_peers) clearSelectedNode(); } void RPCConsole::on_openDebugLogfileButton_clicked() { GUIUtil::openDebugLogfile(); } void RPCConsole::scrollToEnd() { QScrollBar *scrollbar = ui->messagesWidget->verticalScrollBar(); scrollbar->setValue(scrollbar->maximum()); } void RPCConsole::on_sldGraphRange_valueChanged(int value) { const int multiplier = 5; // each position on the slider represents 5 min int mins = value * multiplier; setTrafficGraphRange(mins); } void RPCConsole::setTrafficGraphRange(int mins) { ui->trafficGraph->setGraphRangeMins(mins); ui->lblGraphRange->setText(GUIUtil::formatDurationStr(mins * 60)); } void RPCConsole::updateTrafficStats(quint64 totalBytesIn, quint64 totalBytesOut) { ui->lblBytesIn->setText(GUIUtil::formatBytes(totalBytesIn)); ui->lblBytesOut->setText(GUIUtil::formatBytes(totalBytesOut)); } void RPCConsole::peerSelected(const QItemSelection &selected, const QItemSelection &deselected) { Q_UNUSED(deselected); if (!clientModel || !clientModel->getPeerTableModel() || selected.indexes().isEmpty()) return; const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected.indexes().first().row()); if (stats) updateNodeDetail(stats); } void RPCConsole::peerLayoutAboutToChange() { QModelIndexList selected = ui->peerWidget->selectionModel()->selectedIndexes(); cachedNodeids.clear(); for(int i = 0; i < selected.size(); i++) { const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(selected.at(i).row()); cachedNodeids.append(stats->nodeStats.nodeid); } } void RPCConsole::peerLayoutChanged() { if (!clientModel || !clientModel->getPeerTableModel()) return; const CNodeCombinedStats *stats = nullptr; bool fUnselect = false; bool fReselect = false; if (cachedNodeids.empty()) // no node selected yet return; // find the currently selected row int selectedRow = -1; QModelIndexList selectedModelIndex = ui->peerWidget->selectionModel()->selectedIndexes(); if (!selectedModelIndex.isEmpty()) { selectedRow = selectedModelIndex.first().row(); } // check if our detail node has a row in the table (it may not necessarily // be at selectedRow since its position can change after a layout change) int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.first()); if (detailNodeRow < 0) { // detail node disappeared from table (node disconnected) fUnselect = true; } else { if (detailNodeRow != selectedRow) { // detail node moved position fUnselect = true; fReselect = true; } // get fresh stats on the detail node. stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow); } if (fUnselect && selectedRow >= 0) { clearSelectedNode(); } if (fReselect) { for(int i = 0; i < cachedNodeids.size(); i++) { ui->peerWidget->selectRow(clientModel->getPeerTableModel()->getRowByNodeId(cachedNodeids.at(i))); } } if (stats) updateNodeDetail(stats); } void RPCConsole::updateNodeDetail(const CNodeCombinedStats *stats) { // update the detail ui with latest node information QString peerAddrDetails(QString::fromStdString(stats->nodeStats.addrName) + " "); peerAddrDetails += tr("(node id: %1)").arg(QString::number(stats->nodeStats.nodeid)); if (!stats->nodeStats.addrLocal.empty()) peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal)); ui->peerHeading->setText(peerAddrDetails); ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices)); ui->peerLastSend->setText(stats->nodeStats.nLastSend ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastSend) : tr("never")); ui->peerLastRecv->setText(stats->nodeStats.nLastRecv ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastRecv) : tr("never")); ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes)); ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes)); ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected)); ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingTime)); ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.dPingWait)); ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.dMinPing)); ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset)); ui->peerVersion->setText(QString("%1").arg(QString::number(stats->nodeStats.nVersion))); ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer)); ui->peerDirection->setText(stats->nodeStats.fInbound ? tr("Inbound") : tr("Outbound")); ui->peerHeight->setText(QString("%1").arg(QString::number(stats->nodeStats.nStartingHeight))); ui->peerWhitelisted->setText(stats->nodeStats.fWhitelisted ? tr("Yes") : tr("No")); // This check fails for example if the lock was busy and // nodeStateStats couldn't be fetched. if (stats->fNodeStateStatsAvailable) { // Ban score is init to 0 ui->peerBanScore->setText(QString("%1").arg(stats->nodeStateStats.nMisbehavior)); // Sync height is init to -1 if (stats->nodeStateStats.nSyncHeight > -1) ui->peerSyncHeight->setText(QString("%1").arg(stats->nodeStateStats.nSyncHeight)); else ui->peerSyncHeight->setText(tr("Unknown")); // Common height is init to -1 if (stats->nodeStateStats.nCommonHeight > -1) ui->peerCommonHeight->setText(QString("%1").arg(stats->nodeStateStats.nCommonHeight)); else ui->peerCommonHeight->setText(tr("Unknown")); } ui->detailWidget->show(); } void RPCConsole::resizeEvent(QResizeEvent *event) { QWidget::resizeEvent(event); } void RPCConsole::showEvent(QShowEvent *event) { QWidget::showEvent(event); if (!clientModel || !clientModel->getPeerTableModel()) return; // start PeerTableModel auto refresh clientModel->getPeerTableModel()->startAutoRefresh(); } void RPCConsole::hideEvent(QHideEvent *event) { QWidget::hideEvent(event); if (!clientModel || !clientModel->getPeerTableModel()) return; // stop PeerTableModel auto refresh clientModel->getPeerTableModel()->stopAutoRefresh(); } void RPCConsole::showPeersTableContextMenu(const QPoint& point) { QModelIndex index = ui->peerWidget->indexAt(point); if (index.isValid()) peersTableContextMenu->exec(QCursor::pos()); } void RPCConsole::showBanTableContextMenu(const QPoint& point) { QModelIndex index = ui->banlistWidget->indexAt(point); if (index.isValid()) banTableContextMenu->exec(QCursor::pos()); } void RPCConsole::disconnectSelectedNode() { if(!g_connman) return; // Get selected peer addresses QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId); for(int i = 0; i < nodes.count(); i++) { // Get currently selected peer address NodeId id = nodes.at(i).data().toLongLong(); // Find the node, disconnect it and clear the selected node if(g_connman->DisconnectNode(id)) clearSelectedNode(); } } void RPCConsole::banSelectedNode(int bantime) { if (!clientModel || !g_connman) return; // Get selected peer addresses QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId); for(int i = 0; i < nodes.count(); i++) { // Get currently selected peer address NodeId id = nodes.at(i).data().toLongLong(); // Get currently selected peer address int detailNodeRow = clientModel->getPeerTableModel()->getRowByNodeId(id); if(detailNodeRow < 0) return; // Find possible nodes, ban it and clear the selected node const CNodeCombinedStats *stats = clientModel->getPeerTableModel()->getNodeStats(detailNodeRow); if(stats) { g_connman->Ban(stats->nodeStats.addr, BanReasonManuallyAdded, bantime); } } clearSelectedNode(); clientModel->getBanTableModel()->refresh(); } void RPCConsole::unbanSelectedNode() { if (!clientModel) return; // Get selected ban addresses QList<QModelIndex> nodes = GUIUtil::getEntryData(ui->banlistWidget, BanTableModel::Address); for(int i = 0; i < nodes.count(); i++) { // Get currently selected ban address QString strNode = nodes.at(i).data().toString(); CSubNet possibleSubnet; LookupSubNet(strNode.toStdString().c_str(), possibleSubnet); if (possibleSubnet.IsValid() && g_connman) { g_connman->Unban(possibleSubnet); clientModel->getBanTableModel()->refresh(); } } } void RPCConsole::clearSelectedNode() { ui->peerWidget->selectionModel()->clearSelection(); cachedNodeids.clear(); ui->detailWidget->hide(); ui->peerHeading->setText(tr("Select a peer to view detailed information.")); } void RPCConsole::showOrHideBanTableIfRequired() { if (!clientModel) return; bool visible = clientModel->getBanTableModel()->shouldShow(); ui->banlistWidget->setVisible(visible); ui->banHeading->setVisible(visible); } void RPCConsole::setTabFocus(enum TabTypes tabType) { ui->tabWidget->setCurrentIndex(tabType); }
/* This file is part of mediaserver. A webrtc sfu server. * Copyright (C) 2018 Arvind Umrao <akumrao@yahoo.com> & Herman Umrao<hermanumrao@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include "base/platform.h" #include "base/error.h" #include "uv.h" #include <cstring> #include <sstream> #include <string> #ifdef base_WIN #include <windows.h> #include <winsock2.h> #else #include <unistd.h> #endif #define PATHMAX 1024 namespace base { std::string getExePath() { char buf[PATHMAX]; size_t size = PATHMAX; if (uv_exepath(buf, &size) != 0) throw std::runtime_error( "System error: Cannot resolve executable path"); return std::string(buf, size); } std::string getCwd() { char buf[PATHMAX]; size_t size = PATHMAX; if (uv_cwd(buf, &size) != 0) throw std::runtime_error( "System error: Cannot resolve working directory"); return std::string(buf); } uint64_t getFreeMemory() { return uv_get_free_memory(); } uint64_t getTotalMemory() { return uv_get_total_memory(); } int numCpuCores() { uv_cpu_info_t *info; int cpu_count; uv_cpu_info(&info, &cpu_count); uv_free_cpu_info(info, cpu_count); return cpu_count; } void sleep(int ms) { //#ifdef base_WIN // Sleep(ms); //#else // usleep(ms * 1000); //#endif uv_sleep(ms); } void pause() { std::puts("Press enter to continue..."); std::getchar(); } std::string getHostname() { char name[256]; gethostname(name, 256); return name; } std::string getEnv(const std::string& name, const std::string& defaultValue) { const char* value = getenv(name.c_str()); if (value) return value; return defaultValue; } bool getEnvBool(const std::string& name) { const char* value = getenv(name.c_str()); return value && ( strcmp(value, "1") == 0 || strcmp(value, "true") == 0); } } // namespace base /// @\}
#ifdef WITH_PYTHON_LAYER #include "boost/python.hpp" namespace bp = boost::python; #endif #include <glog/logging.h> #include <cstring> #include <map> #include <string> #include <vector> #include "boost/algorithm/string.hpp" #include "caffe/caffe.hpp" #include "caffe/util/signal_handler.h" using caffe::Blob; using caffe::Caffe; using caffe::Net; using caffe::Layer; using caffe::Solver; using caffe::shared_ptr; using caffe::string; using caffe::Timer; using caffe::vector; using std::ostringstream; DEFINE_string(gpu, "", "Optional; run in GPU mode on given device IDs separated by ','." "Use '-gpu all' to run on all available GPUs. The effective training " "batch size is multiplied by the number of devices."); DEFINE_string(solver, "", "The solver definition protocol buffer text file."); DEFINE_string(model, "", "The model definition protocol buffer text file.."); DEFINE_string(snapshot, "", "Optional; the snapshot solver state to resume training."); DEFINE_string(weights, "", "Optional; the pretrained weights to initialize finetuning, " "separated by ','. Cannot be set simultaneously with snapshot."); DEFINE_int32(iterations, 50, "The number of iterations to run."); DEFINE_string(sigint_effect, "stop", "Optional; action to take when a SIGINT signal is received: " "snapshot, stop or none."); DEFINE_string(sighup_effect, "snapshot", "Optional; action to take when a SIGHUP signal is received: " "snapshot, stop or none."); // A simple registry for caffe commands. typedef int (*BrewFunction)(); typedef std::map<caffe::string, BrewFunction> BrewMap; BrewMap g_brew_map; #define RegisterBrewFunction(func) \ namespace { \ class __Registerer_##func { \ public: /* NOLINT */ \ __Registerer_##func() { \ g_brew_map[#func] = &func; \ } \ }; \ __Registerer_##func g_registerer_##func; \ } static BrewFunction GetBrewFunction(const caffe::string& name) { if (g_brew_map.count(name)) { return g_brew_map[name]; } else { LOG(ERROR) << "Available caffe actions:"; for (BrewMap::iterator it = g_brew_map.begin(); it != g_brew_map.end(); ++it) { LOG(ERROR) << "\t" << it->first; } LOG(FATAL) << "Unknown action: " << name; return NULL; // not reachable, just to suppress old compiler warnings. } } // Parse GPU ids or use all available devices static void get_gpus(vector<int>* gpus) { if (FLAGS_gpu == "all") { int count = 0; #ifndef CPU_ONLY CUDA_CHECK(cudaGetDeviceCount(&count)); #else NO_GPU; #endif for (int i = 0; i < count; ++i) { gpus->push_back(i); } } else if (FLAGS_gpu.size()) { vector<string> strings; boost::split(strings, FLAGS_gpu, boost::is_any_of(",")); for (int i = 0; i < strings.size(); ++i) { gpus->push_back(boost::lexical_cast<int>(strings[i])); } } else { CHECK_EQ(gpus->size(), 0); } } // caffe commands to call by // caffe <command> <args> // // To add a command, define a function "int command()" and register it with // RegisterBrewFunction(action); // Device Query: show diagnostic information for a GPU device. int device_query() { LOG(INFO) << "Querying GPUs " << FLAGS_gpu; vector<int> gpus; get_gpus(&gpus); for (int i = 0; i < gpus.size(); ++i) { caffe::Caffe::SetDevice(gpus[i]); caffe::Caffe::DeviceQuery(); } return 0; } RegisterBrewFunction(device_query); // Load the weights from the specified caffemodel(s) into the train and // test nets. void CopyLayers(caffe::Solver<float>* solver, const std::string& model_list) { std::vector<std::string> model_names; boost::split(model_names, model_list, boost::is_any_of(",") ); for (int i = 0; i < model_names.size(); ++i) { LOG(INFO) << "Finetuning from " << model_names[i]; solver->net()->CopyTrainedLayersFrom(model_names[i]); for (int j = 0; j < solver->test_nets().size(); ++j) { solver->test_nets()[j]->CopyTrainedLayersFrom(model_names[i]); } } } // Translate the signal effect the user specified on the command-line to the // corresponding enumeration. caffe::SolverAction::Enum GetRequestedAction( const std::string& flag_value) { if (flag_value == "stop") { return caffe::SolverAction::STOP; } if (flag_value == "snapshot") { return caffe::SolverAction::SNAPSHOT; } if (flag_value == "none") { return caffe::SolverAction::NONE; } LOG(FATAL) << "Invalid signal effect \""<< flag_value << "\" was specified"; } // Train / Finetune a model. int train() { CHECK_GT(FLAGS_solver.size(), 0) << "Need a solver definition to train."; CHECK(!FLAGS_snapshot.size() || !FLAGS_weights.size()) << "Give a snapshot to resume training or weights to finetune " "but not both."; caffe::SolverParameter solver_param; caffe::ReadProtoFromTextFileOrDie(FLAGS_solver, &solver_param); // If the gpus flag is not provided, allow the mode and device to be set // in the solver prototxt. if (FLAGS_gpu.size() == 0 && solver_param.solver_mode() == caffe::SolverParameter_SolverMode_GPU) { if (solver_param.has_device_id()) { FLAGS_gpu = "" + boost::lexical_cast<string>(solver_param.device_id()); } else { // Set default GPU if unspecified FLAGS_gpu = "" + boost::lexical_cast<string>(0); } } vector<int> gpus; get_gpus(&gpus); if (gpus.size() == 0) { Caffe::set_mode(Caffe::CPU); } else { ostringstream s; for (int i = 0; i < gpus.size(); ++i) { s << (i ? ", " : "") << gpus[i]; } LOG(INFO) << "Using GPUs " << s.str(); solver_param.set_device_id(gpus[0]); Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); Caffe::set_solver_count(gpus.size()); } caffe::SignalHandler signal_handler( GetRequestedAction(FLAGS_sigint_effect), GetRequestedAction(FLAGS_sighup_effect)); shared_ptr<caffe::Solver<float> > solver(caffe::GetSolver<float>(solver_param)); solver->SetActionFunction(signal_handler.GetActionFunction()); if (FLAGS_snapshot.size()) { LOG(INFO) << "Resuming from " << FLAGS_snapshot; solver->Restore(FLAGS_snapshot.c_str()); } else if (FLAGS_weights.size()) { CopyLayers(solver.get(), FLAGS_weights); } if (gpus.size() > 1) { caffe::P2PSync<float> sync(solver, NULL, solver->param()); sync.run(gpus); } else { LOG(INFO) << "Starting Optimization"; solver->Solve(); } LOG(INFO) << "Optimization Done."; return 0; } RegisterBrewFunction(train); // Test: score a model. int test() { CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to score."; CHECK_GT(FLAGS_weights.size(), 0) << "Need model weights to score."; // Set device id and mode vector<int> gpus; get_gpus(&gpus); if (gpus.size() != 0) { LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. Net<float> caffe_net(FLAGS_model, caffe::TEST); caffe_net.CopyTrainedLayersFrom(FLAGS_weights); LOG(INFO) << "Running for " << FLAGS_iterations << " iterations."; vector<Blob<float>* > bottom_vec; vector<int> test_score_output_id; vector<float> test_score; float loss = 0; for (int i = 0; i < FLAGS_iterations; ++i) { float iter_loss; const vector<Blob<float>*>& result = caffe_net.Forward(bottom_vec, &iter_loss); loss += iter_loss; int idx = 0; for (int j = 0; j < result.size(); ++j) { const float* result_vec = result[j]->cpu_data(); for (int k = 0; k < result[j]->count(); ++k, ++idx) { const float score = result_vec[k]; if (i == 0) { test_score.push_back(score); test_score_output_id.push_back(j); } else { test_score[idx] += score; } const std::string& output_name = caffe_net.blob_names()[ caffe_net.output_blob_indices()[j]]; LOG(INFO) << "Batch " << i << ", " << output_name << " = " << score; } } } loss /= FLAGS_iterations; LOG(INFO) << "Loss: " << loss; for (int i = 0; i < test_score.size(); ++i) { const std::string& output_name = caffe_net.blob_names()[ caffe_net.output_blob_indices()[test_score_output_id[i]]]; const float loss_weight = caffe_net.blob_loss_weights()[ caffe_net.output_blob_indices()[test_score_output_id[i]]]; std::ostringstream loss_msg_stream; const float mean_score = test_score[i] / FLAGS_iterations; if (loss_weight) { loss_msg_stream << " (* " << loss_weight << " = " << loss_weight * mean_score << " loss)"; } LOG(INFO) << output_name << " = " << mean_score << loss_msg_stream.str(); } return 0; } RegisterBrewFunction(test); // Time: benchmark the execution time of a model. int time() { CHECK_GT(FLAGS_model.size(), 0) << "Need a model definition to time."; // Set device id and mode vector<int> gpus; get_gpus(&gpus); if (gpus.size() != 0) { LOG(INFO) << "Use GPU with device ID " << gpus[0]; Caffe::SetDevice(gpus[0]); Caffe::set_mode(Caffe::GPU); } else { LOG(INFO) << "Use CPU."; Caffe::set_mode(Caffe::CPU); } // Instantiate the caffe net. Net<float> caffe_net(FLAGS_model, caffe::TRAIN); // Do a clean forward and backward pass, so that memory allocation are done // and future iterations will be more stable. LOG(INFO) << "Performing Forward"; // Note that for the speed benchmark, we will assume that the network does // not take any input blobs. float initial_loss; caffe_net.Forward(vector<Blob<float>*>(), &initial_loss); LOG(INFO) << "Initial loss: " << initial_loss; LOG(INFO) << "Performing Backward"; caffe_net.Backward(); const vector<shared_ptr<Layer<float> > >& layers = caffe_net.layers(); const vector<vector<Blob<float>*> >& bottom_vecs = caffe_net.bottom_vecs(); const vector<vector<Blob<float>*> >& top_vecs = caffe_net.top_vecs(); const vector<vector<bool> >& bottom_need_backward = caffe_net.bottom_need_backward(); LOG(INFO) << "*** Benchmark begins ***"; LOG(INFO) << "Testing for " << FLAGS_iterations << " iterations."; Timer total_timer; total_timer.Start(); Timer forward_timer; Timer backward_timer; Timer timer; std::vector<double> forward_time_per_layer(layers.size(), 0.0); std::vector<double> backward_time_per_layer(layers.size(), 0.0); double forward_time = 0.0; double backward_time = 0.0; for (int j = 0; j < FLAGS_iterations; ++j) { Timer iter_timer; iter_timer.Start(); forward_timer.Start(); for (int i = 0; i < layers.size(); ++i) { timer.Start(); layers[i]->Forward(bottom_vecs[i], top_vecs[i]); forward_time_per_layer[i] += timer.MicroSeconds(); } forward_time += forward_timer.MicroSeconds(); backward_timer.Start(); for (int i = layers.size() - 1; i >= 0; --i) { timer.Start(); layers[i]->Backward(top_vecs[i], bottom_need_backward[i], bottom_vecs[i]); backward_time_per_layer[i] += timer.MicroSeconds(); } backward_time += backward_timer.MicroSeconds(); LOG(INFO) << "Iteration: " << j + 1 << " forward-backward time: " << iter_timer.MilliSeconds() << " ms."; } LOG(INFO) << "Average time per layer: "; for (int i = 0; i < layers.size(); ++i) { const caffe::string& layername = layers[i]->layer_param().name(); LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << "\tforward: " << forward_time_per_layer[i] / 1000 / FLAGS_iterations << " ms."; LOG(INFO) << std::setfill(' ') << std::setw(10) << layername << "\tbackward: " << backward_time_per_layer[i] / 1000 / FLAGS_iterations << " ms."; } total_timer.Stop(); LOG(INFO) << "Average Forward pass: " << forward_time / 1000 / FLAGS_iterations << " ms."; LOG(INFO) << "Average Backward pass: " << backward_time / 1000 / FLAGS_iterations << " ms."; LOG(INFO) << "Average Forward-Backward: " << total_timer.MilliSeconds() / FLAGS_iterations << " ms."; LOG(INFO) << "Total Time: " << total_timer.MilliSeconds() << " ms."; LOG(INFO) << "*** Benchmark ends ***"; return 0; } RegisterBrewFunction(time); int main(int argc, char** argv) { // Print output to stderr (while still logging). FLAGS_alsologtostderr = 1; // Usage message. gflags::SetUsageMessage("command line brew\n" "usage: caffe <command> <args>\n\n" "commands:\n" " train train or finetune a model\n" " test score a model\n" " device_query show GPU diagnostic information\n" " time benchmark model execution time"); // Run tool or show usage. caffe::GlobalInit(&argc, &argv); if (argc == 2) { #ifdef WITH_PYTHON_LAYER try { #endif return GetBrewFunction(caffe::string(argv[1]))(); #ifdef WITH_PYTHON_LAYER } catch (bp::error_already_set) { PyErr_Print(); return 1; } #endif } else { gflags::ShowUsageWithFlagsRestrict(argv[0], "tools/caffe"); } }
// // MIT License // // Copyright (c) 2017-2018 Thibault Martinez and Simon Ninon // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. // // #include <gtest/gtest.h> #include <json.hpp> #include <iota/api/responses/get_inclusion_states.hpp> TEST(GetInclusionStatesResponse, CtorShouldInitFields) { const IOTA::API::Responses::GetInclusionStates res{ std::vector<bool>({ true, false, true }) }; EXPECT_EQ(res.getStates(), std::vector<bool>({ true, false, true })); EXPECT_EQ(res.getDuration(), 0); } TEST(GetInclusionStatesResponse, DeserializeShouldSetFields) { IOTA::API::Responses::GetInclusionStates res; json data; std::vector<bool> states; states.push_back(true); data["states"] = states; res.deserialize(data); EXPECT_EQ(res.getStates(), states); }
// Copyright (c) 2012-2015 The Bitcoin Core developers // Copyright (c) 2020-2022 The Cosanta Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "consensus/merkle.h" #include "merkleblock.h" #include "serialize.h" #include "streams.h" #include "uint256.h" #include "arith_uint256.h" #include "version.h" #include "test/test_cosanta.h" #include <vector> #include <boost/test/unit_test.hpp> class CPartialMerkleTreeTester : public CPartialMerkleTree { public: // flip one bit in one of the hashes - this should break the authentication void Damage() { unsigned int n = InsecureRandRange(vHash.size()); int bit = InsecureRandBits(8); *(vHash[n].begin() + (bit>>3)) ^= 1<<(bit&7); } }; BOOST_FIXTURE_TEST_SUITE(pmt_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(pmt_test1) { SeedInsecureRand(false); static const unsigned int nTxCounts[] = {1, 4, 7, 17, 56, 100, 127, 256, 312, 513, 1000, 4095}; for (int i = 0; i < 12; i++) { unsigned int nTx = nTxCounts[i]; // build a block with some dummy transactions CBlock block; for (unsigned int j=0; j<nTx; j++) { CMutableTransaction tx; tx.nLockTime = j; // actual transaction data doesn't matter; just make the nLockTime's unique block.vtx.push_back(MakeTransactionRef(std::move(tx))); } // calculate actual merkle root and height uint256 merkleRoot1 = BlockMerkleRoot(block); std::vector<uint256> vTxid(nTx, uint256()); for (unsigned int j=0; j<nTx; j++) vTxid[j] = block.vtx[j]->GetHash(); int nHeight = 1, nTx_ = nTx; while (nTx_ > 1) { nTx_ = (nTx_+1)/2; nHeight++; } // check with random subsets with inclusion chances 1, 1/2, 1/4, ..., 1/128 for (int att = 1; att < 15; att++) { // build random subset of txid's std::vector<bool> vMatch(nTx, false); std::vector<uint256> vMatchTxid1; for (unsigned int j=0; j<nTx; j++) { bool fInclude = InsecureRandBits(att / 2) == 0; vMatch[j] = fInclude; if (fInclude) vMatchTxid1.push_back(vTxid[j]); } // build the partial merkle tree CPartialMerkleTree pmt1(vTxid, vMatch); // serialize CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << pmt1; // verify CPartialMerkleTree's size guarantees unsigned int n = std::min<unsigned int>(nTx, 1 + vMatchTxid1.size()*nHeight); BOOST_CHECK(ss.size() <= 10 + (258*n+7)/8); // deserialize into a tester copy CPartialMerkleTreeTester pmt2; ss >> pmt2; // extract merkle root and matched txids from copy std::vector<uint256> vMatchTxid2; std::vector<unsigned int> vIndex; uint256 merkleRoot2 = pmt2.ExtractMatches(vMatchTxid2, vIndex); // check that it has the same merkle root as the original, and a valid one BOOST_CHECK(merkleRoot1 == merkleRoot2); BOOST_CHECK(!merkleRoot2.IsNull()); // check that it contains the matched transactions (in the same order!) BOOST_CHECK(vMatchTxid1 == vMatchTxid2); // check that random bit flips break the authentication for (int j=0; j<4; j++) { CPartialMerkleTreeTester pmt3(pmt2); pmt3.Damage(); std::vector<uint256> vMatchTxid3; uint256 merkleRoot3 = pmt3.ExtractMatches(vMatchTxid3, vIndex); BOOST_CHECK(merkleRoot3 != merkleRoot1); } } } } BOOST_AUTO_TEST_CASE(pmt_malleability) { std::vector<uint256> vTxid = { ArithToUint256(1), ArithToUint256(2), ArithToUint256(3), ArithToUint256(4), ArithToUint256(5), ArithToUint256(6), ArithToUint256(7), ArithToUint256(8), ArithToUint256(9), ArithToUint256(10), ArithToUint256(9), ArithToUint256(10), }; std::vector<bool> vMatch = {false, false, false, false, false, false, false, false, false, true, true, false}; CPartialMerkleTree tree(vTxid, vMatch); std::vector<unsigned int> vIndex; BOOST_CHECK(tree.ExtractMatches(vTxid, vIndex).IsNull()); } BOOST_AUTO_TEST_SUITE_END()
// Copyright Carl Philipp Reh 2006 - 2019. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <sge/opengl/context/base.hpp> #include <sge/opengl/context/id.hpp> #include <sge/opengl/context/make_id.hpp> #include <sge/opengl/state/ffp/sampler/default_context.hpp> #include <sge/opengl/state/ffp/sampler/make_actors.hpp> #include <sge/opengl/state/ffp/sampler/object.hpp> #include <sge/renderer/state/ffp/sampler/default.hpp> #include <sge/renderer/state/ffp/sampler/parameters.hpp> #include <fcppt/make_unique_ptr.hpp> #include <fcppt/preprocessor/disable_clang_warning.hpp> #include <fcppt/preprocessor/pop_warning.hpp> #include <fcppt/preprocessor/push_warning.hpp> sge::opengl::state::ffp::sampler::default_context::default_context() : sge::opengl::context::base(), default_state_(fcppt::make_unique_ptr<sge::opengl::state::ffp::sampler::object>( sge::opengl::state::ffp::sampler::make_actors( sge::renderer::state::ffp::sampler::default_()))) { } sge::opengl::state::ffp::sampler::default_context::~default_context() = default; sge::opengl::state::ffp::sampler::object const & sge::opengl::state::ffp::sampler::default_context::default_state() const { return *default_state_; } FCPPT_PP_PUSH_WARNING FCPPT_PP_DISABLE_CLANG_WARNING(-Wglobal-constructors) sge::opengl::context::id const sge::opengl::state::ffp::sampler::default_context::static_id(sge::opengl::context::make_id()); FCPPT_PP_POP_WARNING
/* TEMPLATE GENERATED TESTCASE FILE Filename: CWE122_Heap_Based_Buffer_Overflow__cpp_CWE129_fgets_67a.cpp Label Definition File: CWE122_Heap_Based_Buffer_Overflow__cpp_CWE129.label.xml Template File: sources-sinks-67a.tmpl.cpp */ /* * @description * CWE: 122 Heap Based Buffer Overflow * BadSource: fgets Read data from the console using fgets() * GoodSource: Larger than zero but less than 10 * Sinks: * GoodSink: Ensure the array index is valid * BadSink : Improperly check the array index by not checking the upper bound * Flow Variant: 67 Data flow: data passed in a struct from one function to another in different source files * * */ #include "std_testcase.h" #define CHAR_ARRAY_SIZE (3 * sizeof(data) + 2) namespace CWE122_Heap_Based_Buffer_Overflow__cpp_CWE129_fgets_67 { typedef struct _structType { int structFirst; } structType; #ifndef OMITBAD /* bad function declaration */ void badSink(structType myStruct); void bad() { int data; structType myStruct; /* Initialize data */ data = -1; { char inputBuffer[CHAR_ARRAY_SIZE] = ""; /* POTENTIAL FLAW: Read data from the console using fgets() */ if (fgets(inputBuffer, CHAR_ARRAY_SIZE, stdin) != NULL) { /* Convert to int */ data = atoi(inputBuffer); } else { printLine("fgets() failed."); } } myStruct.structFirst = data; badSink(myStruct); } #endif /* OMITBAD */ #ifndef OMITGOOD /* goodG2B uses the GoodSource with the BadSink */ void goodG2BSink(structType myStruct); static void goodG2B() { int data; structType myStruct; /* Initialize data */ data = -1; /* FIX: Use a value greater than 0, but less than 10 to avoid attempting to * access an index of the array in the sink that is out-of-bounds */ data = 7; myStruct.structFirst = data; goodG2BSink(myStruct); } /* goodB2G uses the BadSource with the GoodSink */ void goodB2GSink(structType myStruct); static void goodB2G() { int data; structType myStruct; /* Initialize data */ data = -1; { char inputBuffer[CHAR_ARRAY_SIZE] = ""; /* POTENTIAL FLAW: Read data from the console using fgets() */ if (fgets(inputBuffer, CHAR_ARRAY_SIZE, stdin) != NULL) { /* Convert to int */ data = atoi(inputBuffer); } else { printLine("fgets() failed."); } } myStruct.structFirst = data; goodB2GSink(myStruct); } void good() { goodG2B(); goodB2G(); } #endif /* OMITGOOD */ } /* close namespace */ /* Below is the main(). It is only used when building this testcase on its own for testing or for building a binary to use in testing binary analysis tools. It is not used when compiling all the testcases as one application, which is how source code analysis tools are tested. */ #ifdef INCLUDEMAIN using namespace CWE122_Heap_Based_Buffer_Overflow__cpp_CWE129_fgets_67; /* so that we can use good and bad easily */ int main(int argc, char * argv[]) { /* seed randomness */ srand( (unsigned)time(NULL) ); #ifndef OMITGOOD printLine("Calling good()..."); good(); printLine("Finished good()"); #endif /* OMITGOOD */ #ifndef OMITBAD printLine("Calling bad()..."); bad(); printLine("Finished bad()"); #endif /* OMITBAD */ return 0; } #endif
/****************************************************************************** * * Project: GDAL Gridding API. * Purpose: Implementation of GDAL scattered data gridder. * Author: Even Rouault, <even dot rouault at spatialys.com> * ****************************************************************************** * Copyright (c) 2013, Even Rouault <even dot rouault at spatialys.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "gdalgrid.h" #include "gdalgrid_priv.h" #ifdef HAVE_AVX_AT_COMPILE_TIME #include <immintrin.h> CPL_CVSID("$Id$") /************************************************************************/ /* GDALGridInverseDistanceToAPower2NoSmoothingNoSearchAVX() */ /************************************************************************/ #define GDAL_mm256_load1_ps(x) _mm256_set_ps(x, x, x, x, x, x, x, x) CPLErr GDALGridInverseDistanceToAPower2NoSmoothingNoSearchAVX( const void *poOptions, GUInt32 nPoints, CPL_UNUSED const double *unused_padfX, CPL_UNUSED const double *unused_padfY, CPL_UNUSED const double *unused_padfZ, double dfXPoint, double dfYPoint, double *pdfValue, void* hExtraParamsIn ) { size_t i = 0; GDALGridExtraParameters* psExtraParams = static_cast<GDALGridExtraParameters*>(hExtraParamsIn); const float* pafX = psExtraParams->pafX; const float* pafY = psExtraParams->pafY; const float* pafZ = psExtraParams->pafZ; const float fEpsilon = 0.0000000000001f; const float fXPoint = static_cast<float>(dfXPoint); const float fYPoint = static_cast<float>(dfYPoint); const __m256 ymm_small = GDAL_mm256_load1_ps(fEpsilon); const __m256 ymm_x = GDAL_mm256_load1_ps(fXPoint); const __m256 ymm_y = GDAL_mm256_load1_ps(fYPoint); __m256 ymm_nominator = _mm256_setzero_ps(); __m256 ymm_denominator = _mm256_setzero_ps(); int mask = 0; #undef LOOP_SIZE #if defined(__x86_64) || defined(_M_X64) /* This would also work in 32bit mode, but there are only 8 XMM registers */ /* whereas we have 16 for 64bit */ #define LOOP_SIZE 16 size_t nPointsRound = (nPoints / LOOP_SIZE) * LOOP_SIZE; for( i = 0; i < nPointsRound; i += LOOP_SIZE ) { __m256 ymm_rx = _mm256_sub_ps(_mm256_load_ps(pafX + i), ymm_x); /* rx = pafX[i] - fXPoint */ __m256 ymm_rx_8 = _mm256_sub_ps(_mm256_load_ps(pafX + i + 8), ymm_x); __m256 ymm_ry = _mm256_sub_ps(_mm256_load_ps(pafY + i), ymm_y); /* ry = pafY[i] - fYPoint */ __m256 ymm_ry_8 = _mm256_sub_ps(_mm256_load_ps(pafY + i + 8), ymm_y); __m256 ymm_r2 = _mm256_add_ps(_mm256_mul_ps(ymm_rx, ymm_rx), /* r2 = rx * rx + ry * ry */ _mm256_mul_ps(ymm_ry, ymm_ry)); __m256 ymm_r2_8 = _mm256_add_ps(_mm256_mul_ps(ymm_rx_8, ymm_rx_8), _mm256_mul_ps(ymm_ry_8, ymm_ry_8)); __m256 ymm_invr2 = _mm256_rcp_ps(ymm_r2); /* invr2 = 1.0f / r2 */ __m256 ymm_invr2_8 = _mm256_rcp_ps(ymm_r2_8); ymm_nominator = _mm256_add_ps(ymm_nominator, /* nominator += invr2 * pafZ[i] */ _mm256_mul_ps(ymm_invr2, _mm256_load_ps(pafZ + i))); ymm_nominator = _mm256_add_ps(ymm_nominator, _mm256_mul_ps(ymm_invr2_8, _mm256_load_ps(pafZ + i + 8))); ymm_denominator = _mm256_add_ps(ymm_denominator, ymm_invr2); /* denominator += invr2 */ ymm_denominator = _mm256_add_ps(ymm_denominator, ymm_invr2_8); mask = _mm256_movemask_ps(_mm256_cmp_ps(ymm_r2, ymm_small, _CMP_LT_OS)) | /* if( r2 < fEpsilon) */ (_mm256_movemask_ps(_mm256_cmp_ps(ymm_r2_8, ymm_small, _CMP_LT_OS)) << 8); if( mask ) break; } #else #define LOOP_SIZE 8 size_t nPointsRound = (nPoints / LOOP_SIZE) * LOOP_SIZE; for( i = 0; i < nPointsRound; i += LOOP_SIZE ) { __m256 ymm_rx = _mm256_sub_ps(_mm256_load_ps(const_cast<float*>(pafX) + i), ymm_x); /* rx = pafX[i] - fXPoint */ __m256 ymm_ry = _mm256_sub_ps(_mm256_load_ps(const_cast<float*>(pafY) + i), ymm_y); /* ry = pafY[i] - fYPoint */ __m256 ymm_r2 = _mm256_add_ps(_mm256_mul_ps(ymm_rx, ymm_rx), /* r2 = rx * rx + ry * ry */ _mm256_mul_ps(ymm_ry, ymm_ry)); __m256 ymm_invr2 = _mm256_rcp_ps(ymm_r2); /* invr2 = 1.0f / r2 */ ymm_nominator = _mm256_add_ps(ymm_nominator, /* nominator += invr2 * pafZ[i] */ _mm256_mul_ps(ymm_invr2, _mm256_load_ps(const_cast<float*>(pafZ) + i))); ymm_denominator = _mm256_add_ps(ymm_denominator, ymm_invr2); /* denominator += invr2 */ mask = _mm256_movemask_ps(_mm256_cmp_ps(ymm_r2, ymm_small, _CMP_LT_OS)); /* if( r2 < fEpsilon) */ if( mask ) break; } #endif // Find which i triggered r2 < fEpsilon. if( mask ) { for( int j = 0; j < LOOP_SIZE; j++ ) { if( mask & (1 << j) ) { (*pdfValue) = (pafZ)[i + j]; // GCC and MSVC need explicit zeroing. #if !defined(__clang__) _mm256_zeroupper(); #endif return CE_None; } } } #undef LOOP_SIZE // Get back nominator and denominator values for YMM registers. float afNominator[8]; float afDenominator[8]; _mm256_storeu_ps(afNominator, ymm_nominator); _mm256_storeu_ps(afDenominator, ymm_denominator); // MSVC doesn't emit AVX afterwards but may use SSE, so clear // upper bits. Other compilers will continue using AVX for the // below floating points operations. #if defined(_MSC_FULL_VER) _mm256_zeroupper(); #endif float fNominator = afNominator[0] + afNominator[1] + afNominator[2] + afNominator[3] + afNominator[4] + afNominator[5] + afNominator[6] + afNominator[7]; float fDenominator = afDenominator[0] + afDenominator[1] + afDenominator[2] + afDenominator[3] + afDenominator[4] + afDenominator[5] + afDenominator[6] + afDenominator[7]; // Do the few remaining loop iterations. for( ; i < nPoints; i++ ) { const float fRX = pafX[i] - fXPoint; const float fRY = pafY[i] - fYPoint; const float fR2 = fRX * fRX + fRY * fRY; // If the test point is close to the grid node, use the point // value directly as a node value to avoid singularity. if( fR2 < 0.0000000000001 ) { break; } else { const float fInvR2 = 1.0f / fR2; fNominator += fInvR2 * pafZ[i]; fDenominator += fInvR2; } } if( i != nPoints ) { (*pdfValue) = pafZ[i]; } else if( fDenominator == 0.0 ) { (*pdfValue) = static_cast<const GDALGridInverseDistanceToAPowerOptions*>(poOptions)->dfNoDataValue; } else (*pdfValue) = fNominator / fDenominator; // GCC needs explicit zeroing. #if defined(__GNUC__) && !defined(__clang__) _mm256_zeroupper(); #endif return CE_None; } #endif /* HAVE_AVX_AT_COMPILE_TIME */
// RUN: rm -f %t.ll // RUN: %atjitc -O2 %s -o %t // RUN: %t %t.ll > %t.out // RUN: %FileCheck %s < %t.out // RUN: %FileCheck --check-prefix=CHECK-IR %s < %t.ll #include <easy/jit.h> #include <easy/options.h> #include <functional> #include <cstdio> // only one function // reading from a global variable // CHECK-IR-NOT: = tail call using namespace std::placeholders; struct Foo { virtual int doit() { return 1; } virtual ~Foo() = default; }; struct Bar : Foo { int doit() override { return 2; } }; int doit(Foo* f) { return f->doit(); } int main(int argc, char** argv) { Foo* f = nullptr; if(argc == 1) f = new Foo(); else f = new Bar(); easy::FunctionWrapper<int(void)> easy_doit = easy::jit(doit, f, easy::options::dump_ir(argv[1])); // CHECK: doit() is 2 printf("doit() is %d\n", easy_doit()); delete f; return 0; }
#include "caffe2/mobile/contrib/arm-compute/test/gl_model_test.h" namespace caffe2 { // The last softmax op didn't pass because of the dimension mismatch, and we are not likely to hit it in other models, but the implementation should be correct // TEST(OPENGLModelTest, SqueezenetV11) { // std::string parent_path = "/data/local/tmp/"; // benchmarkModel(parent_path + "squeezenet_init.pb", parent_path + "squeezenet_predict.pb", "data", {1, 3, 224, 224}, "squeezenet_v11"); // } } // namespace caffe2
#include "stdafx.h" #include "OutStreamWrapper.h" namespace SevenZip { namespace intl { OutStreamWrapper::OutStreamWrapper( const CComPtr< IStream >& baseStream ) : m_refCount( 0 ) , m_baseStream( baseStream ) { } OutStreamWrapper::~OutStreamWrapper() { } HRESULT STDMETHODCALLTYPE OutStreamWrapper::QueryInterface( REFIID iid, void** ppvObject ) { if ( iid == __uuidof( IUnknown ) ) { *ppvObject = static_cast< IUnknown* >( this ); AddRef(); return S_OK; } if ( iid == IID_ISequentialOutStream ) { *ppvObject = static_cast< ISequentialOutStream* >( this ); AddRef(); return S_OK; } if ( iid == IID_IOutStream ) { *ppvObject = static_cast< IOutStream* >( this ); AddRef(); return S_OK; } return E_NOINTERFACE; } ULONG STDMETHODCALLTYPE OutStreamWrapper::AddRef() { return static_cast< ULONG >( InterlockedIncrement( &m_refCount ) ); } ULONG STDMETHODCALLTYPE OutStreamWrapper::Release() { ULONG res = static_cast< ULONG >( InterlockedDecrement( &m_refCount ) ); if ( res == 0 ) { delete this; } return res; } STDMETHODIMP OutStreamWrapper::Write( const void* data, UInt32 size, UInt32* processedSize ) { ULONG written = 0; HRESULT hr = m_baseStream->Write( data, size, &written ); if ( processedSize != NULL ) { *processedSize = written; } return hr; } STDMETHODIMP OutStreamWrapper::Seek( Int64 offset, UInt32 seekOrigin, UInt64* newPosition ) { LARGE_INTEGER move; ULARGE_INTEGER newPos; move.QuadPart = offset; HRESULT hr = m_baseStream->Seek( move, seekOrigin, &newPos ); if ( newPosition != NULL ) { *newPosition = newPos.QuadPart; } return hr; } STDMETHODIMP OutStreamWrapper::SetSize( UInt64 newSize ) { ULARGE_INTEGER size; size.QuadPart = newSize; return m_baseStream->SetSize( size ); } } }
#include "misc.h" #include <fstream> #include <unistd.h> #include "3rd-party/catch.hpp" void TestHelpers::assert_article_file_content(const std::string& path, const std::string& title, const std::string& author, const std::string& date, const std::string& url, const std::string& description) { std::string prefix_title = "Title: "; std::string prefix_author = "Author: "; std::string prefix_date = "Date: "; std::string prefix_link = "Link: "; std::string line; std::ifstream articleFileStream(path); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == prefix_title + title); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == prefix_author + author); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == prefix_date + date); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == prefix_link + url); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == " "); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == description); REQUIRE(std::getline(articleFileStream, line)); REQUIRE(line == ""); }; void TestHelpers::copy_file(const std::string& source, const std::string& destination) { std::ifstream src(source, std::ios::binary); std::ofstream dst(destination, std::ios::binary); REQUIRE(src.is_open()); REQUIRE(dst.is_open()); dst << src.rdbuf(); } std::vector<std::string> TestHelpers::file_contents(const std::string& filepath) { std::vector<std::string> lines; std::ifstream in(filepath); while (in.is_open() && !in.eof()) { std::string line; std::getline(in, line); lines.emplace_back(std::move(line)); } return lines; } bool TestHelpers::starts_with(const std::string& input, const std::string& prefix) { return input.substr(0, prefix.size()) == prefix; } bool TestHelpers::file_exists(const std::string& filepath) { return access(filepath.c_str(), F_OK) == 0; }
/* Copyright 2017 - 2021 R. Thomas * Copyright 2017 - 2021 Quarkslab * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef LIEF_PE_ADVAPI32_DLL_LOOKUP_H_ #define LIEF_PE_ADVAPI32_DLL_LOOKUP_H_ #include <map> namespace LIEF { namespace PE { static const std::map<uint32_t, const char*> advapi32_dll_lookup { { 0x0002, "A_SHAFinal" }, { 0x0003, "A_SHAInit" }, { 0x0004, "A_SHAUpdate" }, { 0x0005, "AbortSystemShutdownA" }, { 0x0006, "AbortSystemShutdownW" }, { 0x0007, "AccessCheck" }, { 0x0008, "AccessCheckAndAuditAlarmA" }, { 0x0009, "AccessCheckAndAuditAlarmW" }, { 0x000a, "AccessCheckByType" }, { 0x000b, "AccessCheckByTypeAndAuditAlarmA" }, { 0x000c, "AccessCheckByTypeAndAuditAlarmW" }, { 0x000d, "AccessCheckByTypeResultList" }, { 0x000e, "AccessCheckByTypeResultListAndAuditAlarmA" }, { 0x000f, "AccessCheckByTypeResultListAndAuditAlarmByHandleA" }, { 0x0010, "AccessCheckByTypeResultListAndAuditAlarmByHandleW" }, { 0x0011, "AccessCheckByTypeResultListAndAuditAlarmW" }, { 0x0012, "AddAccessAllowedAce" }, { 0x0013, "AddAccessAllowedAceEx" }, { 0x0014, "AddAccessAllowedObjectAce" }, { 0x0015, "AddAccessDeniedAce" }, { 0x0016, "AddAccessDeniedAceEx" }, { 0x0017, "AddAccessDeniedObjectAce" }, { 0x0018, "AddAce" }, { 0x0019, "AddAuditAccessAce" }, { 0x001a, "AddAuditAccessAceEx" }, { 0x001b, "AddAuditAccessObjectAce" }, { 0x001c, "AddUsersToEncryptedFile" }, { 0x001d, "AdjustTokenGroups" }, { 0x001e, "AdjustTokenPrivileges" }, { 0x001f, "AllocateAndInitializeSid" }, { 0x0020, "AllocateLocallyUniqueId" }, { 0x0021, "AreAllAccessesGranted" }, { 0x0022, "AreAnyAccessesGranted" }, { 0x0023, "BackupEventLogA" }, { 0x0024, "BackupEventLogW" }, { 0x0025, "BuildExplicitAccessWithNameA" }, { 0x0026, "BuildExplicitAccessWithNameW" }, { 0x0027, "BuildImpersonateExplicitAccessWithNameA" }, { 0x0028, "BuildImpersonateExplicitAccessWithNameW" }, { 0x0029, "BuildImpersonateTrusteeA" }, { 0x002a, "BuildImpersonateTrusteeW" }, { 0x002b, "BuildSecurityDescriptorA" }, { 0x002c, "BuildSecurityDescriptorW" }, { 0x002d, "BuildTrusteeWithNameA" }, { 0x002e, "BuildTrusteeWithNameW" }, { 0x002f, "BuildTrusteeWithObjectsAndNameA" }, { 0x0030, "BuildTrusteeWithObjectsAndNameW" }, { 0x0031, "BuildTrusteeWithObjectsAndSidA" }, { 0x0032, "BuildTrusteeWithObjectsAndSidW" }, { 0x0033, "BuildTrusteeWithSidA" }, { 0x0034, "BuildTrusteeWithSidW" }, { 0x0035, "CancelOverlappedAccess" }, { 0x0036, "ChangeServiceConfig2A" }, { 0x0037, "ChangeServiceConfig2W" }, { 0x0038, "ChangeServiceConfigA" }, { 0x0039, "ChangeServiceConfigW" }, { 0x003a, "CheckTokenMembership" }, { 0x003b, "ClearEventLogA" }, { 0x003c, "ClearEventLogW" }, { 0x003d, "CloseCodeAuthzLevel" }, { 0x003e, "CloseEncryptedFileRaw" }, { 0x003f, "CloseEventLog" }, { 0x0040, "CloseServiceHandle" }, { 0x0041, "CloseTrace" }, { 0x0042, "CommandLineFromMsiDescriptor" }, { 0x0043, "ComputeAccessTokenFromCodeAuthzLevel" }, { 0x0044, "ControlService" }, { 0x0045, "ControlTraceA" }, { 0x0046, "ControlTraceW" }, { 0x0047, "ConvertAccessToSecurityDescriptorA" }, { 0x0048, "ConvertAccessToSecurityDescriptorW" }, { 0x0049, "ConvertSDToStringSDRootDomainA" }, { 0x004a, "ConvertSDToStringSDRootDomainW" }, { 0x004b, "ConvertSecurityDescriptorToAccessA" }, { 0x004c, "ConvertSecurityDescriptorToAccessNamedA" }, { 0x004d, "ConvertSecurityDescriptorToAccessNamedW" }, { 0x004e, "ConvertSecurityDescriptorToAccessW" }, { 0x004f, "ConvertSecurityDescriptorToStringSecurityDescriptorA" }, { 0x0050, "ConvertSecurityDescriptorToStringSecurityDescriptorW" }, { 0x0051, "ConvertSidToStringSidA" }, { 0x0052, "ConvertSidToStringSidW" }, { 0x0053, "ConvertStringSDToSDDomainA" }, { 0x0054, "ConvertStringSDToSDDomainW" }, { 0x0055, "ConvertStringSDToSDRootDomainA" }, { 0x0056, "ConvertStringSDToSDRootDomainW" }, { 0x0057, "ConvertStringSecurityDescriptorToSecurityDescriptorA" }, { 0x0058, "ConvertStringSecurityDescriptorToSecurityDescriptorW" }, { 0x0059, "ConvertStringSidToSidA" }, { 0x005a, "ConvertStringSidToSidW" }, { 0x005b, "ConvertToAutoInheritPrivateObjectSecurity" }, { 0x005c, "CopySid" }, { 0x005d, "CreateCodeAuthzLevel" }, { 0x005e, "CreatePrivateObjectSecurity" }, { 0x005f, "CreatePrivateObjectSecurityEx" }, { 0x0060, "CreatePrivateObjectSecurityWithMultipleInheritance" }, { 0x0061, "CreateProcessAsUserA" }, { 0x0062, "CreateProcessAsUserSecure" }, { 0x0063, "CreateProcessAsUserW" }, { 0x0064, "CreateProcessWithLogonW" }, { 0x0065, "CreateRestrictedToken" }, { 0x0066, "CreateServiceA" }, { 0x0067, "CreateServiceW" }, { 0x0068, "CreateTraceInstanceId" }, { 0x0069, "CreateWellKnownSid" }, { 0x006a, "CredDeleteA" }, { 0x006b, "CredDeleteW" }, { 0x006c, "CredEnumerateA" }, { 0x006d, "CredEnumerateW" }, { 0x006e, "CredFree" }, { 0x006f, "CredGetSessionTypes" }, { 0x0070, "CredGetTargetInfoA" }, { 0x0071, "CredGetTargetInfoW" }, { 0x0072, "CredIsMarshaledCredentialA" }, { 0x0073, "CredIsMarshaledCredentialW" }, { 0x0074, "CredMarshalCredentialA" }, { 0x0075, "CredMarshalCredentialW" }, { 0x0076, "CredProfileLoaded" }, { 0x0077, "CredReadA" }, { 0x0078, "CredReadDomainCredentialsA" }, { 0x0079, "CredReadDomainCredentialsW" }, { 0x007a, "CredReadW" }, { 0x007b, "CredRenameA" }, { 0x007c, "CredRenameW" }, { 0x007d, "CredUnmarshalCredentialA" }, { 0x007e, "CredUnmarshalCredentialW" }, { 0x007f, "CredWriteA" }, { 0x0080, "CredWriteDomainCredentialsA" }, { 0x0081, "CredWriteDomainCredentialsW" }, { 0x0082, "CredWriteW" }, { 0x0083, "CredpConvertCredential" }, { 0x0084, "CredpConvertTargetInfo" }, { 0x0085, "CredpDecodeCredential" }, { 0x0086, "CredpEncodeCredential" }, { 0x0087, "CryptAcquireContextA" }, { 0x0088, "CryptAcquireContextW" }, { 0x0089, "CryptContextAddRef" }, { 0x008a, "CryptCreateHash" }, { 0x008b, "CryptDecrypt" }, { 0x008c, "CryptDeriveKey" }, { 0x008d, "CryptDestroyHash" }, { 0x008e, "CryptDestroyKey" }, { 0x008f, "CryptDuplicateHash" }, { 0x0090, "CryptDuplicateKey" }, { 0x0091, "CryptEncrypt" }, { 0x0092, "CryptEnumProviderTypesA" }, { 0x0093, "CryptEnumProviderTypesW" }, { 0x0094, "CryptEnumProvidersA" }, { 0x0095, "CryptEnumProvidersW" }, { 0x0096, "CryptExportKey" }, { 0x0097, "CryptGenKey" }, { 0x0098, "CryptGenRandom" }, { 0x0099, "CryptGetDefaultProviderA" }, { 0x009a, "CryptGetDefaultProviderW" }, { 0x009b, "CryptGetHashParam" }, { 0x009c, "CryptGetKeyParam" }, { 0x009d, "CryptGetProvParam" }, { 0x009e, "CryptGetUserKey" }, { 0x009f, "CryptHashData" }, { 0x00a0, "CryptHashSessionKey" }, { 0x00a1, "CryptImportKey" }, { 0x00a2, "CryptReleaseContext" }, { 0x00a3, "CryptSetHashParam" }, { 0x00a4, "CryptSetKeyParam" }, { 0x00a5, "CryptSetProvParam" }, { 0x00a6, "CryptSetProviderA" }, { 0x00a7, "CryptSetProviderExA" }, { 0x00a8, "CryptSetProviderExW" }, { 0x00a9, "CryptSetProviderW" }, { 0x00aa, "CryptSignHashA" }, { 0x00ab, "CryptSignHashW" }, { 0x00ac, "CryptVerifySignatureA" }, { 0x00ad, "CryptVerifySignatureW" }, { 0x00ae, "DecryptFileA" }, { 0x00af, "DecryptFileW" }, { 0x00b0, "DeleteAce" }, { 0x00b1, "DeleteService" }, { 0x00b2, "DeregisterEventSource" }, { 0x00b3, "DestroyPrivateObjectSecurity" }, { 0x00b4, "DuplicateEncryptionInfoFile" }, { 0x00b5, "DuplicateToken" }, { 0x00b6, "DuplicateTokenEx" }, { 0x00b7, "ElfBackupEventLogFileA" }, { 0x00b8, "ElfBackupEventLogFileW" }, { 0x00b9, "ElfChangeNotify" }, { 0x00ba, "ElfClearEventLogFileA" }, { 0x00bb, "ElfClearEventLogFileW" }, { 0x00bc, "ElfCloseEventLog" }, { 0x00bd, "ElfDeregisterEventSource" }, { 0x00be, "ElfFlushEventLog" }, { 0x00bf, "ElfNumberOfRecords" }, { 0x00c0, "ElfOldestRecord" }, { 0x00c1, "ElfOpenBackupEventLogA" }, { 0x00c2, "ElfOpenBackupEventLogW" }, { 0x00c3, "ElfOpenEventLogA" }, { 0x00c4, "ElfOpenEventLogW" }, { 0x00c5, "ElfReadEventLogA" }, { 0x00c6, "ElfReadEventLogW" }, { 0x00c7, "ElfRegisterEventSourceA" }, { 0x00c8, "ElfRegisterEventSourceW" }, { 0x00c9, "ElfReportEventA" }, { 0x00ca, "ElfReportEventW" }, { 0x00cb, "EnableTrace" }, { 0x00cc, "EncryptFileA" }, { 0x00cd, "EncryptFileW" }, { 0x00ce, "EncryptedFileKeyInfo" }, { 0x00cf, "EncryptionDisable" }, { 0x00d0, "EnumDependentServicesA" }, { 0x00d1, "EnumDependentServicesW" }, { 0x00d2, "EnumServiceGroupW" }, { 0x00d3, "EnumServicesStatusA" }, { 0x00d4, "EnumServicesStatusExA" }, { 0x00d5, "EnumServicesStatusExW" }, { 0x00d6, "EnumServicesStatusW" }, { 0x00d7, "EnumerateTraceGuids" }, { 0x00d8, "EqualDomainSid" }, { 0x00d9, "EqualPrefixSid" }, { 0x00da, "EqualSid" }, { 0x00db, "FileEncryptionStatusA" }, { 0x00dc, "FileEncryptionStatusW" }, { 0x00dd, "FindFirstFreeAce" }, { 0x00de, "FlushTraceA" }, { 0x00df, "FlushTraceW" }, { 0x00e0, "FreeEncryptedFileKeyInfo" }, { 0x00e1, "FreeEncryptionCertificateHashList" }, { 0x00e2, "FreeInheritedFromArray" }, { 0x00e3, "FreeSid" }, { 0x00e4, "GetAccessPermissionsForObjectA" }, { 0x00e5, "GetAccessPermissionsForObjectW" }, { 0x00e6, "GetAce" }, { 0x00e7, "GetAclInformation" }, { 0x00e8, "GetAuditedPermissionsFromAclA" }, { 0x00e9, "GetAuditedPermissionsFromAclW" }, { 0x00ea, "GetCurrentHwProfileA" }, { 0x00eb, "GetCurrentHwProfileW" }, { 0x00ec, "GetEffectiveRightsFromAclA" }, { 0x00ed, "GetEffectiveRightsFromAclW" }, { 0x00ee, "GetEventLogInformation" }, { 0x00ef, "GetExplicitEntriesFromAclA" }, { 0x00f0, "GetExplicitEntriesFromAclW" }, { 0x00f1, "GetFileSecurityA" }, { 0x00f2, "GetFileSecurityW" }, { 0x00f3, "GetInformationCodeAuthzLevelW" }, { 0x00f4, "GetInformationCodeAuthzPolicyW" }, { 0x00f5, "GetInheritanceSourceA" }, { 0x00f6, "GetInheritanceSourceW" }, { 0x00f7, "GetKernelObjectSecurity" }, { 0x00f8, "GetLengthSid" }, { 0x00f9, "GetLocalManagedApplicationData" }, { 0x00fa, "GetLocalManagedApplications" }, { 0x00fb, "GetManagedApplicationCategories" }, { 0x00fc, "GetManagedApplications" }, { 0x00fd, "GetMultipleTrusteeA" }, { 0x00fe, "GetMultipleTrusteeOperationA" }, { 0x00ff, "GetMultipleTrusteeOperationW" }, { 0x0100, "GetMultipleTrusteeW" }, { 0x0101, "GetNamedSecurityInfoA" }, { 0x0102, "GetNamedSecurityInfoExA" }, { 0x0103, "GetNamedSecurityInfoExW" }, { 0x0104, "GetNamedSecurityInfoW" }, { 0x0105, "GetNumberOfEventLogRecords" }, { 0x0106, "GetOldestEventLogRecord" }, { 0x0107, "GetOverlappedAccessResults" }, { 0x0108, "GetPrivateObjectSecurity" }, { 0x0109, "GetSecurityDescriptorControl" }, { 0x010a, "GetSecurityDescriptorDacl" }, { 0x010b, "GetSecurityDescriptorGroup" }, { 0x010c, "GetSecurityDescriptorLength" }, { 0x010d, "GetSecurityDescriptorOwner" }, { 0x010e, "GetSecurityDescriptorRMControl" }, { 0x010f, "GetSecurityDescriptorSacl" }, { 0x0110, "GetSecurityInfo" }, { 0x0111, "GetSecurityInfoExA" }, { 0x0112, "GetSecurityInfoExW" }, { 0x0113, "GetServiceDisplayNameA" }, { 0x0114, "GetServiceDisplayNameW" }, { 0x0115, "GetServiceKeyNameA" }, { 0x0116, "GetServiceKeyNameW" }, { 0x0117, "GetSidIdentifierAuthority" }, { 0x0118, "GetSidLengthRequired" }, { 0x0119, "GetSidSubAuthority" }, { 0x011a, "GetSidSubAuthorityCount" }, { 0x011b, "GetTokenInformation" }, { 0x011c, "GetTraceEnableFlags" }, { 0x011d, "GetTraceEnableLevel" }, { 0x011e, "GetTraceLoggerHandle" }, { 0x011f, "GetTrusteeFormA" }, { 0x0120, "GetTrusteeFormW" }, { 0x0121, "GetTrusteeNameA" }, { 0x0122, "GetTrusteeNameW" }, { 0x0123, "GetTrusteeTypeA" }, { 0x0124, "GetTrusteeTypeW" }, { 0x0125, "GetUserNameA" }, { 0x0126, "GetUserNameW" }, { 0x0127, "GetWindowsAccountDomainSid" }, { 0x0001, "I_ScGetCurrentGroupStateW" }, { 0x0128, "I_ScIsSecurityProcess" }, { 0x0129, "I_ScPnPGetServiceName" }, { 0x012a, "I_ScSendTSMessage" }, { 0x012b, "I_ScSetServiceBitsA" }, { 0x012c, "I_ScSetServiceBitsW" }, { 0x012d, "IdentifyCodeAuthzLevelW" }, { 0x012e, "ImpersonateAnonymousToken" }, { 0x012f, "ImpersonateLoggedOnUser" }, { 0x0130, "ImpersonateNamedPipeClient" }, { 0x0131, "ImpersonateSelf" }, { 0x0132, "InitializeAcl" }, { 0x0133, "InitializeSecurityDescriptor" }, { 0x0134, "InitializeSid" }, { 0x0135, "InitiateSystemShutdownA" }, { 0x0136, "InitiateSystemShutdownExA" }, { 0x0137, "InitiateSystemShutdownExW" }, { 0x0138, "InitiateSystemShutdownW" }, { 0x0139, "InstallApplication" }, { 0x013a, "IsTextUnicode" }, { 0x013b, "IsTokenRestricted" }, { 0x013c, "IsTokenUntrusted" }, { 0x013d, "IsValidAcl" }, { 0x013e, "IsValidSecurityDescriptor" }, { 0x013f, "IsValidSid" }, { 0x0140, "IsWellKnownSid" }, { 0x0141, "LockServiceDatabase" }, { 0x0142, "LogonUserA" }, { 0x0143, "LogonUserExA" }, { 0x0144, "LogonUserExW" }, { 0x0145, "LogonUserW" }, { 0x0146, "LookupAccountNameA" }, { 0x0147, "LookupAccountNameW" }, { 0x0148, "LookupAccountSidA" }, { 0x0149, "LookupAccountSidW" }, { 0x014a, "LookupPrivilegeDisplayNameA" }, { 0x014b, "LookupPrivilegeDisplayNameW" }, { 0x014c, "LookupPrivilegeNameA" }, { 0x014d, "LookupPrivilegeNameW" }, { 0x014e, "LookupPrivilegeValueA" }, { 0x014f, "LookupPrivilegeValueW" }, { 0x0150, "LookupSecurityDescriptorPartsA" }, { 0x0151, "LookupSecurityDescriptorPartsW" }, { 0x0152, "LsaAddAccountRights" }, { 0x0153, "LsaAddPrivilegesToAccount" }, { 0x0154, "LsaClearAuditLog" }, { 0x0155, "LsaClose" }, { 0x0156, "LsaCreateAccount" }, { 0x0157, "LsaCreateSecret" }, { 0x0158, "LsaCreateTrustedDomain" }, { 0x0159, "LsaCreateTrustedDomainEx" }, { 0x015a, "LsaDelete" }, { 0x015b, "LsaDeleteTrustedDomain" }, { 0x015c, "LsaEnumerateAccountRights" }, { 0x015d, "LsaEnumerateAccounts" }, { 0x015e, "LsaEnumerateAccountsWithUserRight" }, { 0x015f, "LsaEnumeratePrivileges" }, { 0x0160, "LsaEnumeratePrivilegesOfAccount" }, { 0x0161, "LsaEnumerateTrustedDomains" }, { 0x0162, "LsaEnumerateTrustedDomainsEx" }, { 0x0163, "LsaFreeMemory" }, { 0x0164, "LsaGetQuotasForAccount" }, { 0x0165, "LsaGetRemoteUserName" }, { 0x0166, "LsaGetSystemAccessAccount" }, { 0x0167, "LsaGetUserName" }, { 0x0168, "LsaICLookupNames" }, { 0x0169, "LsaICLookupNamesWithCreds" }, { 0x016a, "LsaICLookupSids" }, { 0x016b, "LsaICLookupSidsWithCreds" }, { 0x016d, "LsaLookupNames" }, { 0x016c, "LsaLookupNames2" }, { 0x016e, "LsaLookupPrivilegeDisplayName" }, { 0x016f, "LsaLookupPrivilegeName" }, { 0x0170, "LsaLookupPrivilegeValue" }, { 0x0171, "LsaLookupSids" }, { 0x0172, "LsaNtStatusToWinError" }, { 0x0173, "LsaOpenAccount" }, { 0x0174, "LsaOpenPolicy" }, { 0x0175, "LsaOpenPolicySce" }, { 0x0176, "LsaOpenSecret" }, { 0x0177, "LsaOpenTrustedDomain" }, { 0x0178, "LsaOpenTrustedDomainByName" }, { 0x0179, "LsaQueryDomainInformationPolicy" }, { 0x017a, "LsaQueryForestTrustInformation" }, { 0x017b, "LsaQueryInfoTrustedDomain" }, { 0x017c, "LsaQueryInformationPolicy" }, { 0x017d, "LsaQuerySecret" }, { 0x017e, "LsaQuerySecurityObject" }, { 0x017f, "LsaQueryTrustedDomainInfo" }, { 0x0180, "LsaQueryTrustedDomainInfoByName" }, { 0x0181, "LsaRemoveAccountRights" }, { 0x0182, "LsaRemovePrivilegesFromAccount" }, { 0x0183, "LsaRetrievePrivateData" }, { 0x0184, "LsaSetDomainInformationPolicy" }, { 0x0185, "LsaSetForestTrustInformation" }, { 0x0186, "LsaSetInformationPolicy" }, { 0x0187, "LsaSetInformationTrustedDomain" }, { 0x0188, "LsaSetQuotasForAccount" }, { 0x0189, "LsaSetSecret" }, { 0x018a, "LsaSetSecurityObject" }, { 0x018b, "LsaSetSystemAccessAccount" }, { 0x018c, "LsaSetTrustedDomainInfoByName" }, { 0x018d, "LsaSetTrustedDomainInformation" }, { 0x018e, "LsaStorePrivateData" }, { 0x018f, "MD4Final" }, { 0x0190, "MD4Init" }, { 0x0191, "MD4Update" }, { 0x0192, "MD5Final" }, { 0x0193, "MD5Init" }, { 0x0194, "MD5Update" }, { 0x0196, "MSChapSrvChangePassword" }, { 0x0195, "MSChapSrvChangePassword2" }, { 0x0198, "MakeAbsoluteSD" }, { 0x0197, "MakeAbsoluteSD2" }, { 0x0199, "MakeSelfRelativeSD" }, { 0x019a, "MapGenericMask" }, { 0x019b, "NotifyBootConfigStatus" }, { 0x019c, "NotifyChangeEventLog" }, { 0x019d, "ObjectCloseAuditAlarmA" }, { 0x019e, "ObjectCloseAuditAlarmW" }, { 0x019f, "ObjectDeleteAuditAlarmA" }, { 0x01a0, "ObjectDeleteAuditAlarmW" }, { 0x01a1, "ObjectOpenAuditAlarmA" }, { 0x01a2, "ObjectOpenAuditAlarmW" }, { 0x01a3, "ObjectPrivilegeAuditAlarmA" }, { 0x01a4, "ObjectPrivilegeAuditAlarmW" }, { 0x01a5, "OpenBackupEventLogA" }, { 0x01a6, "OpenBackupEventLogW" }, { 0x01a7, "OpenEncryptedFileRawA" }, { 0x01a8, "OpenEncryptedFileRawW" }, { 0x01a9, "OpenEventLogA" }, { 0x01aa, "OpenEventLogW" }, { 0x01ab, "OpenProcessToken" }, { 0x01ac, "OpenSCManagerA" }, { 0x01ad, "OpenSCManagerW" }, { 0x01ae, "OpenServiceA" }, { 0x01af, "OpenServiceW" }, { 0x01b0, "OpenThreadToken" }, { 0x01b1, "OpenTraceA" }, { 0x01b2, "OpenTraceW" }, { 0x01b3, "PrivilegeCheck" }, { 0x01b4, "PrivilegedServiceAuditAlarmA" }, { 0x01b5, "PrivilegedServiceAuditAlarmW" }, { 0x01b6, "ProcessIdleTasks" }, { 0x01b7, "ProcessTrace" }, { 0x01b8, "QueryAllTracesA" }, { 0x01b9, "QueryAllTracesW" }, { 0x01ba, "QueryRecoveryAgentsOnEncryptedFile" }, { 0x01bb, "QueryServiceConfig2A" }, { 0x01bc, "QueryServiceConfig2W" }, { 0x01bd, "QueryServiceConfigA" }, { 0x01be, "QueryServiceConfigW" }, { 0x01bf, "QueryServiceLockStatusA" }, { 0x01c0, "QueryServiceLockStatusW" }, { 0x01c1, "QueryServiceObjectSecurity" }, { 0x01c2, "QueryServiceStatus" }, { 0x01c3, "QueryServiceStatusEx" }, { 0x01c4, "QueryTraceA" }, { 0x01c5, "QueryTraceW" }, { 0x01c6, "QueryUsersOnEncryptedFile" }, { 0x01c7, "QueryWindows31FilesMigration" }, { 0x01c8, "ReadEncryptedFileRaw" }, { 0x01c9, "ReadEventLogA" }, { 0x01ca, "ReadEventLogW" }, { 0x01cb, "RegCloseKey" }, { 0x01cc, "RegConnectRegistryA" }, { 0x01cd, "RegConnectRegistryW" }, { 0x01ce, "RegCreateKeyA" }, { 0x01cf, "RegCreateKeyExA" }, { 0x01d0, "RegCreateKeyExW" }, { 0x01d1, "RegCreateKeyW" }, { 0x01d2, "RegDeleteKeyA" }, { 0x01d3, "RegDeleteKeyW" }, { 0x01d4, "RegDeleteValueA" }, { 0x01d5, "RegDeleteValueW" }, { 0x01d6, "RegDisablePredefinedCache" }, { 0x01d7, "RegEnumKeyA" }, { 0x01d8, "RegEnumKeyExA" }, { 0x01d9, "RegEnumKeyExW" }, { 0x01da, "RegEnumKeyW" }, { 0x01db, "RegEnumValueA" }, { 0x01dc, "RegEnumValueW" }, { 0x01dd, "RegFlushKey" }, { 0x01de, "RegGetKeySecurity" }, { 0x01df, "RegLoadKeyA" }, { 0x01e0, "RegLoadKeyW" }, { 0x01e1, "RegNotifyChangeKeyValue" }, { 0x01e2, "RegOpenCurrentUser" }, { 0x01e3, "RegOpenKeyA" }, { 0x01e4, "RegOpenKeyExA" }, { 0x01e5, "RegOpenKeyExW" }, { 0x01e6, "RegOpenKeyW" }, { 0x01e7, "RegOpenUserClassesRoot" }, { 0x01e8, "RegOverridePredefKey" }, { 0x01e9, "RegQueryInfoKeyA" }, { 0x01ea, "RegQueryInfoKeyW" }, { 0x01eb, "RegQueryMultipleValuesA" }, { 0x01ec, "RegQueryMultipleValuesW" }, { 0x01ed, "RegQueryValueA" }, { 0x01ee, "RegQueryValueExA" }, { 0x01ef, "RegQueryValueExW" }, { 0x01f0, "RegQueryValueW" }, { 0x01f1, "RegReplaceKeyA" }, { 0x01f2, "RegReplaceKeyW" }, { 0x01f3, "RegRestoreKeyA" }, { 0x01f4, "RegRestoreKeyW" }, { 0x01f5, "RegSaveKeyA" }, { 0x01f6, "RegSaveKeyExA" }, { 0x01f7, "RegSaveKeyExW" }, { 0x01f8, "RegSaveKeyW" }, { 0x01f9, "RegSetKeySecurity" }, { 0x01fa, "RegSetValueA" }, { 0x01fb, "RegSetValueExA" }, { 0x01fc, "RegSetValueExW" }, { 0x01fd, "RegSetValueW" }, { 0x01fe, "RegUnLoadKeyA" }, { 0x01ff, "RegUnLoadKeyW" }, { 0x0200, "RegisterEventSourceA" }, { 0x0201, "RegisterEventSourceW" }, { 0x0202, "RegisterIdleTask" }, { 0x0203, "RegisterServiceCtrlHandlerA" }, { 0x0204, "RegisterServiceCtrlHandlerExA" }, { 0x0205, "RegisterServiceCtrlHandlerExW" }, { 0x0206, "RegisterServiceCtrlHandlerW" }, { 0x0207, "RegisterTraceGuidsA" }, { 0x0208, "RegisterTraceGuidsW" }, { 0x0209, "RemoveTraceCallback" }, { 0x020a, "RemoveUsersFromEncryptedFile" }, { 0x020b, "ReportEventA" }, { 0x020c, "ReportEventW" }, { 0x020d, "RevertToSelf" }, { 0x020e, "SaferCloseLevel" }, { 0x020f, "SaferComputeTokenFromLevel" }, { 0x0210, "SaferCreateLevel" }, { 0x0211, "SaferGetLevelInformation" }, { 0x0212, "SaferGetPolicyInformation" }, { 0x0213, "SaferIdentifyLevel" }, { 0x0214, "SaferRecordEventLogEntry" }, { 0x0215, "SaferSetLevelInformation" }, { 0x0216, "SaferSetPolicyInformation" }, { 0x0217, "SaferiChangeRegistryScope" }, { 0x0218, "SaferiCompareTokenLevels" }, { 0x0219, "SaferiIsExecutableFileType" }, { 0x021a, "SaferiPopulateDefaultsInRegistry" }, { 0x021b, "SaferiRecordEventLogEntry" }, { 0x021c, "SaferiReplaceProcessThreadTokens" }, { 0x021d, "SaferiSearchMatchingHashRules" }, { 0x021e, "SetAclInformation" }, { 0x021f, "SetEntriesInAccessListA" }, { 0x0220, "SetEntriesInAccessListW" }, { 0x0221, "SetEntriesInAclA" }, { 0x0222, "SetEntriesInAclW" }, { 0x0223, "SetEntriesInAuditListA" }, { 0x0224, "SetEntriesInAuditListW" }, { 0x0225, "SetFileSecurityA" }, { 0x0226, "SetFileSecurityW" }, { 0x0227, "SetInformationCodeAuthzLevelW" }, { 0x0228, "SetInformationCodeAuthzPolicyW" }, { 0x0229, "SetKernelObjectSecurity" }, { 0x022a, "SetNamedSecurityInfoA" }, { 0x022b, "SetNamedSecurityInfoExA" }, { 0x022c, "SetNamedSecurityInfoExW" }, { 0x022d, "SetNamedSecurityInfoW" }, { 0x022e, "SetPrivateObjectSecurity" }, { 0x022f, "SetPrivateObjectSecurityEx" }, { 0x0230, "SetSecurityDescriptorControl" }, { 0x0231, "SetSecurityDescriptorDacl" }, { 0x0232, "SetSecurityDescriptorGroup" }, { 0x0233, "SetSecurityDescriptorOwner" }, { 0x0234, "SetSecurityDescriptorRMControl" }, { 0x0235, "SetSecurityDescriptorSacl" }, { 0x0236, "SetSecurityInfo" }, { 0x0237, "SetSecurityInfoExA" }, { 0x0238, "SetSecurityInfoExW" }, { 0x0239, "SetServiceBits" }, { 0x023a, "SetServiceObjectSecurity" }, { 0x023b, "SetServiceStatus" }, { 0x023c, "SetThreadToken" }, { 0x023d, "SetTokenInformation" }, { 0x023e, "SetTraceCallback" }, { 0x023f, "SetUserFileEncryptionKey" }, { 0x0240, "StartServiceA" }, { 0x0241, "StartServiceCtrlDispatcherA" }, { 0x0242, "StartServiceCtrlDispatcherW" }, { 0x0243, "StartServiceW" }, { 0x0244, "StartTraceA" }, { 0x0245, "StartTraceW" }, { 0x0246, "StopTraceA" }, { 0x0247, "StopTraceW" }, { 0x0248, "SynchronizeWindows31FilesAndWindowsNTRegistry" }, { 0x0249, "SystemFunction001" }, { 0x024a, "SystemFunction002" }, { 0x024b, "SystemFunction003" }, { 0x024c, "SystemFunction004" }, { 0x024d, "SystemFunction005" }, { 0x024e, "SystemFunction006" }, { 0x024f, "SystemFunction007" }, { 0x0250, "SystemFunction008" }, { 0x0251, "SystemFunction009" }, { 0x0252, "SystemFunction010" }, { 0x0253, "SystemFunction011" }, { 0x0254, "SystemFunction012" }, { 0x0255, "SystemFunction013" }, { 0x0256, "SystemFunction014" }, { 0x0257, "SystemFunction015" }, { 0x0258, "SystemFunction016" }, { 0x0259, "SystemFunction017" }, { 0x025a, "SystemFunction018" }, { 0x025b, "SystemFunction019" }, { 0x025c, "SystemFunction020" }, { 0x025d, "SystemFunction021" }, { 0x025e, "SystemFunction022" }, { 0x025f, "SystemFunction023" }, { 0x0260, "SystemFunction024" }, { 0x0261, "SystemFunction025" }, { 0x0262, "SystemFunction026" }, { 0x0263, "SystemFunction027" }, { 0x0264, "SystemFunction028" }, { 0x0265, "SystemFunction029" }, { 0x0266, "SystemFunction030" }, { 0x0267, "SystemFunction031" }, { 0x0268, "SystemFunction032" }, { 0x0269, "SystemFunction033" }, { 0x026a, "SystemFunction034" }, { 0x026b, "SystemFunction035" }, { 0x026c, "SystemFunction036" }, { 0x026d, "SystemFunction040" }, { 0x026e, "SystemFunction041" }, { 0x026f, "TraceEvent" }, { 0x0270, "TraceEventInstance" }, { 0x0271, "TraceMessage" }, { 0x0272, "TraceMessageVa" }, { 0x0273, "TreeResetNamedSecurityInfoA" }, { 0x0274, "TreeResetNamedSecurityInfoW" }, { 0x0275, "TrusteeAccessToObjectA" }, { 0x0276, "TrusteeAccessToObjectW" }, { 0x0277, "UninstallApplication" }, { 0x0278, "UnlockServiceDatabase" }, { 0x0279, "UnregisterIdleTask" }, { 0x027a, "UnregisterTraceGuids" }, { 0x027b, "UpdateTraceA" }, { 0x027c, "UpdateTraceW" }, { 0x027d, "WdmWmiServiceMain" }, { 0x027e, "WmiCloseBlock" }, { 0x027f, "WmiCloseTraceWithCursor" }, { 0x0280, "WmiConvertTimestamp" }, { 0x0281, "WmiDevInstToInstanceNameA" }, { 0x0282, "WmiDevInstToInstanceNameW" }, { 0x0283, "WmiEnumerateGuids" }, { 0x0284, "WmiExecuteMethodA" }, { 0x0285, "WmiExecuteMethodW" }, { 0x0286, "WmiFileHandleToInstanceNameA" }, { 0x0287, "WmiFileHandleToInstanceNameW" }, { 0x0288, "WmiFreeBuffer" }, { 0x0289, "WmiGetFirstTraceOffset" }, { 0x028a, "WmiGetNextEvent" }, { 0x028b, "WmiGetTraceHeader" }, { 0x028c, "WmiMofEnumerateResourcesA" }, { 0x028d, "WmiMofEnumerateResourcesW" }, { 0x028e, "WmiNotificationRegistrationA" }, { 0x028f, "WmiNotificationRegistrationW" }, { 0x0290, "WmiOpenBlock" }, { 0x0291, "WmiOpenTraceWithCursor" }, { 0x0292, "WmiParseTraceEvent" }, { 0x0293, "WmiQueryAllDataA" }, { 0x0294, "WmiQueryAllDataMultipleA" }, { 0x0295, "WmiQueryAllDataMultipleW" }, { 0x0296, "WmiQueryAllDataW" }, { 0x0297, "WmiQueryGuidInformation" }, { 0x0298, "WmiQuerySingleInstanceA" }, { 0x0299, "WmiQuerySingleInstanceMultipleA" }, { 0x029a, "WmiQuerySingleInstanceMultipleW" }, { 0x029b, "WmiQuerySingleInstanceW" }, { 0x029c, "WmiReceiveNotificationsA" }, { 0x029d, "WmiReceiveNotificationsW" }, { 0x029e, "WmiSetSingleInstanceA" }, { 0x029f, "WmiSetSingleInstanceW" }, { 0x02a0, "WmiSetSingleItemA" }, { 0x02a1, "WmiSetSingleItemW" }, { 0x02a2, "Wow64Win32ApiEntry" }, { 0x02a3, "WriteEncryptedFileRaw" }, }; } } #endif
// -*- C++ -*- /** * @file dds4ccm_vendor_t.cpp * @author Marcel Smit, Martin Corino * * @brief NDDS specific implementations * * @copyright Copyright (c) Remedy IT Expertise BV */ #include "dds4ccm/impl/dds4ccm_ndds_publisher_listener_t.h" #include "dds4ccm/logger/dds4ccm_log.h" namespace CIAOX11 { namespace DDS4CCM { namespace VendorUtils { template <typename EVT_STRATEGY> IDL::traits< ::DDS::PublisherListener>::ref_type create_publisher_listener (const EVT_STRATEGY &evs) { return CORBA::make_reference< CIAOX11::NDDS4CCM::NDDSPublisherListener_T<EVT_STRATEGY>> (evs); } template <typename EVT_STRATEGY> ::DDS::StatusKind get_publisher_listener_mask ( IDL::traits< CCM_DDS::ConnectorStatusListener>::ref_type error_listener) { return CIAOX11::NDDS4CCM::NDDSPublisherListener_T<EVT_STRATEGY>::get_mask (error_listener); } } } }
// Copyright 2021 The Tint Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "src/tint/resolver/resolver.h" #include "src/tint/resolver/resolver_test_helper.h" #include "gmock/gmock.h" using namespace tint::number_suffixes; // NOLINT namespace tint::resolver { namespace { struct ResolverVarLetValidationTest : public resolver::TestHelper, public testing::Test {}; TEST_F(ResolverVarLetValidationTest, LetNoInitializer) { // let a : i32; WrapInFunction(Let(Source{{12, 34}}, "a", ty.i32(), nullptr)); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: let declaration must have an initializer"); } TEST_F(ResolverVarLetValidationTest, GlobalLetNoInitializer) { // let a : i32; GlobalConst(Source{{12, 34}}, "a", ty.i32(), nullptr); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: let declaration must have an initializer"); } TEST_F(ResolverVarLetValidationTest, VarNoInitializerNoType) { // var a; WrapInFunction(Var(Source{{12, 34}}, "a", nullptr)); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: function scope var declaration requires a type or " "initializer"); } TEST_F(ResolverVarLetValidationTest, GlobalVarNoInitializerNoType) { // var a; Global(Source{{12, 34}}, "a", nullptr); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: module scope var declaration requires a type and " "initializer"); } TEST_F(ResolverVarLetValidationTest, VarTypeNotStorable) { // var i : i32; // var p : pointer<function, i32> = &v; auto* i = Var("i", ty.i32(), ast::StorageClass::kNone); auto* p = Var(Source{{56, 78}}, "a", ty.pointer<i32>(ast::StorageClass::kFunction), ast::StorageClass::kNone, AddressOf(Source{{12, 34}}, "i")); WrapInFunction(i, p); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "56:78 error: ptr<function, i32, read_write> cannot be used as the " "type of a var"); } TEST_F(ResolverVarLetValidationTest, LetTypeNotConstructible) { // @group(0) @binding(0) var t1 : texture_2d<f32>; // let t2 : t1; auto* t1 = Global("t1", ty.sampled_texture(ast::TextureDimension::k2d, ty.f32()), GroupAndBinding(0, 0)); auto* t2 = Let(Source{{56, 78}}, "t2", nullptr, Expr(t1)); WrapInFunction(t2); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "56:78 error: texture_2d<f32> cannot be used as the type of a let"); } TEST_F(ResolverVarLetValidationTest, LetConstructorWrongType) { // var v : i32 = 2u WrapInFunction(Let(Source{{3, 3}}, "v", ty.i32(), Expr(2_u))); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), R"(3:3 error: cannot initialize let of type 'i32' with value of type 'u32')"); } TEST_F(ResolverVarLetValidationTest, VarConstructorWrongType) { // var v : i32 = 2u WrapInFunction(Var(Source{{3, 3}}, "v", ty.i32(), ast::StorageClass::kNone, Expr(2_u))); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), R"(3:3 error: cannot initialize var of type 'i32' with value of type 'u32')"); } TEST_F(ResolverVarLetValidationTest, LetConstructorWrongTypeViaAlias) { auto* a = Alias("I32", ty.i32()); WrapInFunction(Let(Source{{3, 3}}, "v", ty.Of(a), Expr(2_u))); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), R"(3:3 error: cannot initialize let of type 'i32' with value of type 'u32')"); } TEST_F(ResolverVarLetValidationTest, VarConstructorWrongTypeViaAlias) { auto* a = Alias("I32", ty.i32()); WrapInFunction(Var(Source{{3, 3}}, "v", ty.Of(a), ast::StorageClass::kNone, Expr(2_u))); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), R"(3:3 error: cannot initialize var of type 'i32' with value of type 'u32')"); } TEST_F(ResolverVarLetValidationTest, LetOfPtrConstructedWithRef) { // var a : f32; // let b : ptr<function,f32> = a; const auto priv = ast::StorageClass::kFunction; auto* var_a = Var("a", ty.f32(), priv); auto* var_b = Let(Source{{12, 34}}, "b", ty.pointer<float>(priv), Expr("a"), {}); WrapInFunction(var_a, var_b); ASSERT_FALSE(r()->Resolve()); EXPECT_EQ( r()->error(), R"(12:34 error: cannot initialize let of type 'ptr<function, f32, read_write>' with value of type 'f32')"); } TEST_F(ResolverVarLetValidationTest, LocalLetRedeclared) { // let l : f32 = 1.; // let l : i32 = 0; auto* l1 = Let("l", ty.f32(), Expr(1.f)); auto* l2 = Let(Source{{12, 34}}, "l", ty.i32(), Expr(0_i)); WrapInFunction(l1, l2); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: redeclaration of 'l'\nnote: 'l' previously declared here"); } TEST_F(ResolverVarLetValidationTest, GlobalVarRedeclaredAsLocal) { // var v : f32 = 2.1; // fn my_func() { // var v : f32 = 2.0; // return 0; // } Global("v", ty.f32(), ast::StorageClass::kPrivate, Expr(2.1f)); WrapInFunction(Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone, Expr(2.0f))); EXPECT_TRUE(r()->Resolve()) << r()->error(); } TEST_F(ResolverVarLetValidationTest, VarRedeclaredInInnerBlock) { // { // var v : f32; // { var v : f32; } // } auto* var_outer = Var("v", ty.f32(), ast::StorageClass::kNone); auto* var_inner = Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone); auto* inner = Block(Decl(var_inner)); auto* outer_body = Block(Decl(var_outer), inner); WrapInFunction(outer_body); EXPECT_TRUE(r()->Resolve()) << r()->error(); } TEST_F(ResolverVarLetValidationTest, VarRedeclaredInIfBlock) { // { // var v : f32 = 3.14; // if (true) { var v : f32 = 2.0; } // } auto* var_a_float = Var("v", ty.f32(), ast::StorageClass::kNone, Expr(3.1f)); auto* var = Var(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kNone, Expr(2.0f)); auto* cond = Expr(true); auto* body = Block(Decl(var)); auto* outer_body = Block(Decl(var_a_float), If(cond, body)); WrapInFunction(outer_body); EXPECT_TRUE(r()->Resolve()) << r()->error(); } TEST_F(ResolverVarLetValidationTest, InferredPtrStorageAccessMismatch) { // struct Inner { // arr: array<i32, 4>; // } // struct S { // inner: Inner; // } // @group(0) @binding(0) var<storage> s : S; // fn f() { // let p : pointer<storage, i32, read_write> = &s.inner.arr[2i]; // } auto* inner = Structure("Inner", {Member("arr", ty.array<i32, 4>())}); auto* buf = Structure("S", {Member("inner", ty.Of(inner))}); auto* storage = Global("s", ty.Of(buf), ast::StorageClass::kStorage, ast::AttributeList{ create<ast::BindingAttribute>(0), create<ast::GroupAttribute>(0), }); auto* expr = IndexAccessor(MemberAccessor(MemberAccessor(storage, "inner"), "arr"), 2_i); auto* ptr = Let(Source{{12, 34}}, "p", ty.pointer<i32>(ast::StorageClass::kStorage, ast::Access::kReadWrite), AddressOf(expr)); WrapInFunction(ptr); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: cannot initialize let of type " "'ptr<storage, i32, read_write>' with value of type " "'ptr<storage, i32, read>'"); } TEST_F(ResolverVarLetValidationTest, NonConstructibleType_Atomic) { auto* v = Var("v", ty.atomic(Source{{12, 34}}, ty.i32())); WrapInFunction(v); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: function variable must have a constructible type"); } TEST_F(ResolverVarLetValidationTest, NonConstructibleType_RuntimeArray) { auto* s = Structure("S", {Member(Source{{56, 78}}, "m", ty.array(ty.i32()))}); auto* v = Var(Source{{12, 34}}, "v", ty.Of(s)); WrapInFunction(v); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), R"(12:34 error: runtime-sized arrays can only be used in the <storage> storage class 56:78 note: while analysing structure member S.m 12:34 note: while instantiating variable v)"); } TEST_F(ResolverVarLetValidationTest, NonConstructibleType_Struct_WithAtomic) { auto* s = Structure("S", {Member("m", ty.atomic(ty.i32()))}); auto* v = Var("v", ty.Of(s)); WrapInFunction(v); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "error: function variable must have a constructible type"); } TEST_F(ResolverVarLetValidationTest, NonConstructibleType_InferredType) { // @group(0) @binding(0) var s : sampler; // fn foo() { // var v = s; // } Global("s", ty.sampler(ast::SamplerKind::kSampler), GroupAndBinding(0, 0)); auto* v = Var(Source{{12, 34}}, "v", nullptr, Expr("s")); WrapInFunction(v); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: function variable must have a constructible type"); } TEST_F(ResolverVarLetValidationTest, InvalidStorageClassForInitializer) { // var<workgroup> v : f32 = 1.23; Global(Source{{12, 34}}, "v", ty.f32(), ast::StorageClass::kWorkgroup, Expr(1.23f)); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: var of storage class 'workgroup' cannot have " "an initializer. var initializers are only supported for the " "storage classes 'private' and 'function'"); } TEST_F(ResolverVarLetValidationTest, VectorLetNoType) { // let a : mat3x3 = mat3x3<f32>(); WrapInFunction(Let("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3), vec3<f32>())); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: missing vector element type"); } TEST_F(ResolverVarLetValidationTest, VectorVarNoType) { // var a : mat3x3; WrapInFunction(Var("a", create<ast::Vector>(Source{{12, 34}}, nullptr, 3))); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: missing vector element type"); } TEST_F(ResolverVarLetValidationTest, MatrixLetNoType) { // let a : mat3x3 = mat3x3<f32>(); WrapInFunction(Let("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3), mat3x3<f32>())); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type"); } TEST_F(ResolverVarLetValidationTest, MatrixVarNoType) { // var a : mat3x3; WrapInFunction(Var("a", create<ast::Matrix>(Source{{12, 34}}, nullptr, 3, 3))); EXPECT_FALSE(r()->Resolve()); EXPECT_EQ(r()->error(), "12:34 error: missing matrix element type"); } } // namespace } // namespace tint::resolver
#include "Precompiled.h" // dear imgui, v1.82 WIP // (tables and columns code) /* Index of this file: // [SECTION] Commentary // [SECTION] Header mess // [SECTION] Tables: Main code // [SECTION] Tables: Row changes // [SECTION] Tables: Columns changes // [SECTION] Tables: Columns width management // [SECTION] Tables: Drawing // [SECTION] Tables: Sorting // [SECTION] Tables: Headers // [SECTION] Tables: Context Menu // [SECTION] Tables: Settings (.ini data) // [SECTION] Tables: Garbage Collection // [SECTION] Tables: Debugging // [SECTION] Columns, BeginColumns, EndColumns, etc. */ // Navigating this file: // - In Visual Studio IDE: CTRL+comma ("Edit.NavigateTo") can follow symbols in comments, whereas CTRL+F12 ("Edit.GoToImplementation") cannot. // - With Visual Assist installed: ALT+G ("VAssistX.GoToImplementation") can also follow symbols in comments. //----------------------------------------------------------------------------- // [SECTION] Commentary //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // Typical tables call flow: (root level is generally public API): //----------------------------------------------------------------------------- // - BeginTable() user begin into a table // | BeginChild() - (if ScrollX/ScrollY is set) // | TableBeginInitMemory() - first time table is used // | TableResetSettings() - on settings reset // | TableLoadSettings() - on settings load // | TableBeginApplyRequests() - apply queued resizing/reordering/hiding requests // | - TableSetColumnWidth() - apply resizing width (for mouse resize, often requested by previous frame) // | - TableUpdateColumnsWeightFromWidth()- recompute columns weights (of stretch columns) from their respective width // - TableSetupColumn() user submit columns details (optional) // - TableSetupScrollFreeze() user submit scroll freeze information (optional) //----------------------------------------------------------------------------- // - TableUpdateLayout() [Internal] followup to BeginTable(): setup everything: widths, columns positions, clipping rectangles. Automatically called by the FIRST call to TableNextRow() or TableHeadersRow(). // | TableSetupDrawChannels() - setup ImDrawList channels // | TableUpdateBorders() - detect hovering columns for resize, ahead of contents submission // | TableDrawContextMenu() - draw right-click context menu //----------------------------------------------------------------------------- // - TableHeadersRow() or TableHeader() user submit a headers row (optional) // | TableSortSpecsClickColumn() - when left-clicked: alter sort order and sort direction // | TableOpenContextMenu() - when right-clicked: trigger opening of the default context menu // - TableGetSortSpecs() user queries updated sort specs (optional, generally after submitting headers) // - TableNextRow() user begin into a new row (also automatically called by TableHeadersRow()) // | TableEndRow() - finish existing row // | TableBeginRow() - add a new row // - TableSetColumnIndex() / TableNextColumn() user begin into a cell // | TableEndCell() - close existing column/cell // | TableBeginCell() - enter into current column/cell // - [...] user emit contents //----------------------------------------------------------------------------- // - EndTable() user ends the table // | TableDrawBorders() - draw outer borders, inner vertical borders // | TableMergeDrawChannels() - merge draw channels if clipping isn't required // | EndChild() - (if ScrollX/ScrollY is set) //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // TABLE SIZING //----------------------------------------------------------------------------- // (Read carefully because this is subtle but it does make sense!) //----------------------------------------------------------------------------- // About 'outer_size': // Its meaning needs to differ slightly depending of if we are using ScrollX/ScrollY flags. // Default value is ImVec2(0.0f, 0.0f). // X // - outer_size.x <= 0.0f -> Right-align from window/work-rect right-most edge. With -FLT_MIN or 0.0f will align exactly on right-most edge. // - outer_size.x > 0.0f -> Set Fixed width. // Y with ScrollX/ScrollY disabled: we output table directly in current window // - outer_size.y < 0.0f -> Bottom-align (but will auto extend, unless _NoHostExtendY is set). Not meaningful is parent window can vertically scroll. // - outer_size.y = 0.0f -> No minimum height (but will auto extend, unless _NoHostExtendY is set) // - outer_size.y > 0.0f -> Set Minimum height (but will auto extend, unless _NoHostExtenY is set) // Y with ScrollX/ScrollY enabled: using a child window for scrolling // - outer_size.y < 0.0f -> Bottom-align. Not meaningful is parent window can vertically scroll. // - outer_size.y = 0.0f -> Bottom-align, consistent with BeginChild(). Not recommended unless table is last item in parent window. // - outer_size.y > 0.0f -> Set Exact height. Recommended when using Scrolling on any axis. //----------------------------------------------------------------------------- // Outer size is also affected by the NoHostExtendX/NoHostExtendY flags. // Important to that note how the two flags have slightly different behaviors! // - ImGuiTableFlags_NoHostExtendX -> Make outer width auto-fit to columns (overriding outer_size.x value). Only available when ScrollX/ScrollY are disabled and Stretch columns are not used. // - ImGuiTableFlags_NoHostExtendY -> Make outer height stop exactly at outer_size.y (prevent auto-extending table past the limit). Only available when ScrollX/ScrollY are disabled. Data below the limit will be clipped and not visible. // In theory ImGuiTableFlags_NoHostExtendY could be the default and any non-scrolling tables with outer_size.y != 0.0f would use exact height. // This would be consistent but perhaps less useful and more confusing (as vertically clipped items are not easily noticeable) //----------------------------------------------------------------------------- // About 'inner_width': // With ScrollX disabled: // - inner_width -> *ignored* // With ScrollX enabled: // - inner_width < 0.0f -> *illegal* fit in known width (right align from outer_size.x) <-- weird // - inner_width = 0.0f -> fit in outer_width: Fixed size columns will take space they need (if avail, otherwise shrink down), Stretch columns becomes Fixed columns. // - inner_width > 0.0f -> override scrolling width, generally to be larger than outer_size.x. Fixed column take space they need (if avail, otherwise shrink down), Stretch columns share remaining space! //----------------------------------------------------------------------------- // Details: // - If you want to use Stretch columns with ScrollX, you generally need to specify 'inner_width' otherwise the concept // of "available space" doesn't make sense. // - Even if not really useful, we allow 'inner_width < outer_size.x' for consistency and to facilitate understanding // of what the value does. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // COLUMNS SIZING POLICIES //----------------------------------------------------------------------------- // About overriding column sizing policy and width/weight with TableSetupColumn(): // We use a default parameter of 'init_width_or_weight == -1'. // - with ImGuiTableColumnFlags_WidthFixed, init_width <= 0 (default) --> width is automatic // - with ImGuiTableColumnFlags_WidthFixed, init_width > 0 (explicit) --> width is custom // - with ImGuiTableColumnFlags_WidthStretch, init_weight <= 0 (default) --> weight is 1.0f // - with ImGuiTableColumnFlags_WidthStretch, init_weight > 0 (explicit) --> weight is custom // Widths are specified _without_ CellPadding. If you specify a width of 100.0f, the column will be cover (100.0f + Padding * 2.0f) // and you can fit a 100.0f wide item in it without clipping and with full padding. //----------------------------------------------------------------------------- // About default sizing policy (if you don't specify a ImGuiTableColumnFlags_WidthXXXX flag) // - with Table policy ImGuiTableFlags_SizingFixedFit --> default Column policy is ImGuiTableColumnFlags_WidthFixed, default Width is equal to contents width // - with Table policy ImGuiTableFlags_SizingFixedSame --> default Column policy is ImGuiTableColumnFlags_WidthFixed, default Width is max of all contents width // - with Table policy ImGuiTableFlags_SizingStretchSame --> default Column policy is ImGuiTableColumnFlags_WidthStretch, default Weight is 1.0f // - with Table policy ImGuiTableFlags_SizingStretchWeight --> default Column policy is ImGuiTableColumnFlags_WidthStretch, default Weight is proportional to contents // Default Width and default Weight can be overridden when calling TableSetupColumn(). //----------------------------------------------------------------------------- // About mixing Fixed/Auto and Stretch columns together: // - the typical use of mixing sizing policies is: any number of LEADING Fixed columns, followed by one or two TRAILING Stretch columns. // - using mixed policies with ScrollX does not make much sense, as using Stretch columns with ScrollX does not make much sense in the first place! // that is, unless 'inner_width' is passed to BeginTable() to explicitly provide a total width to layout columns in. // - when using ImGuiTableFlags_SizingFixedSame with mixed columns, only the Fixed/Auto columns will match their widths to the maximum contents width. // - when using ImGuiTableFlags_SizingStretchSame with mixed columns, only the Stretch columns will match their weight/widths. //----------------------------------------------------------------------------- // About using column width: // If a column is manual resizable or has a width specified with TableSetupColumn(): // - you may use GetContentRegionAvail().x to query the width available in a given column. // - right-side alignment features such as SetNextItemWidth(-x) or PushItemWidth(-x) will rely on this width. // If the column is not resizable and has no width specified with TableSetupColumn(): // - its width will be automatic and be the set to the max of items submitted. // - therefore you generally cannot have ALL items of the columns use e.g. SetNextItemWidth(-FLT_MIN). // - but if the column has one or more item of known/fixed size, this will become the reference width used by SetNextItemWidth(-FLT_MIN). //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // TABLES CLIPPING/CULLING //----------------------------------------------------------------------------- // About clipping/culling of Rows in Tables: // - For large numbers of rows, it is recommended you use ImGuiListClipper to only submit visible rows. // ImGuiListClipper is reliant on the fact that rows are of equal height. // See 'Demo->Tables->Vertical Scrolling' or 'Demo->Tables->Advanced' for a demo of using the clipper. // - Note that auto-resizing columns don't play well with using the clipper. // By default a table with _ScrollX but without _Resizable will have column auto-resize. // So, if you want to use the clipper, make sure to either enable _Resizable, either setup columns width explicitly with _WidthFixed. //----------------------------------------------------------------------------- // About clipping/culling of Columns in Tables: // - Both TableSetColumnIndex() and TableNextColumn() return true when the column is visible or performing // width measurements. Otherwise, you may skip submitting the contents of a cell/column, BUT ONLY if you know // it is not going to contribute to row height. // In many situations, you may skip submitting contents for every columns but one (e.g. the first one). // - Case A: column is not hidden by user, and at least partially in sight (most common case). // - Case B: column is clipped / out of sight (because of scrolling or parent ClipRect): TableNextColumn() return false as a hint but we still allow layout output. // - Case C: column is hidden explicitly by the user (e.g. via the context menu, or _DefaultHide column flag, etc.). // // [A] [B] [C] // TableNextColumn(): true false false -> [userland] when TableNextColumn() / TableSetColumnIndex() return false, user can skip submitting items but only if the column doesn't contribute to row height. // SkipItems: false false true -> [internal] when SkipItems is true, most widgets will early out if submitted, resulting is no layout output. // ClipRect: normal zero-width zero-width -> [internal] when ClipRect is zero, ItemAdd() will return false and most widgets will early out mid-way. // ImDrawList output: normal dummy dummy -> [internal] when using the dummy channel, ImDrawList submissions (if any) will be wasted (because cliprect is zero-width anyway). // // - We need distinguish those cases because non-hidden columns that are clipped outside of scrolling bounds should still contribute their height to the row. // However, in the majority of cases, the contribution to row height is the same for all columns, or the tallest cells are known by the programmer. //----------------------------------------------------------------------------- // About clipping/culling of whole Tables: // - Scrolling tables with a known outer size can be clipped earlier as BeginTable() will return false. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- // [SECTION] Header mess //----------------------------------------------------------------------------- #if defined(_MSC_VER) && !defined(_CRT_SECURE_NO_WARNINGS) #define _CRT_SECURE_NO_WARNINGS #endif #include "imgui.h" #ifndef IMGUI_DISABLE #ifndef IMGUI_DEFINE_MATH_OPERATORS #define IMGUI_DEFINE_MATH_OPERATORS #endif #include "imgui_internal.h" // System includes #if defined(_MSC_VER) && _MSC_VER <= 1500 // MSVC 2008 or earlier #include <stddef.h> // intptr_t #else #include <stdint.h> // intptr_t #endif // Visual Studio warnings #ifdef _MSC_VER #pragma warning (disable: 4127) // condition expression is constant #pragma warning (disable: 4996) // 'This function or variable may be unsafe': strcpy, strdup, sprintf, vsnprintf, sscanf, fopen #if defined(_MSC_VER) && _MSC_VER >= 1922 // MSVC 2019 16.2 or later #pragma warning (disable: 5054) // operator '|': deprecated between enumerations of different types #endif #endif // Clang/GCC warnings with -Weverything #if defined(__clang__) #if __has_warning("-Wunknown-warning-option") #pragma clang diagnostic ignored "-Wunknown-warning-option" // warning: unknown warning group 'xxx' // not all warnings are known by all Clang versions and they tend to be rename-happy.. so ignoring warnings triggers new warnings on some configuration. Great! #endif #pragma clang diagnostic ignored "-Wunknown-pragmas" // warning: unknown warning group 'xxx' #pragma clang diagnostic ignored "-Wold-style-cast" // warning: use of old-style cast // yes, they are more terse. #pragma clang diagnostic ignored "-Wfloat-equal" // warning: comparing floating point with == or != is unsafe // storing and comparing against same constants (typically 0.0f) is ok. #pragma clang diagnostic ignored "-Wformat-nonliteral" // warning: format string is not a string literal // passing non-literal to vsnformat(). yes, user passing incorrect format strings can crash the code. #pragma clang diagnostic ignored "-Wsign-conversion" // warning: implicit conversion changes signedness #pragma clang diagnostic ignored "-Wzero-as-null-pointer-constant" // warning: zero as null pointer constant // some standard header variations use #define NULL 0 #pragma clang diagnostic ignored "-Wdouble-promotion" // warning: implicit conversion from 'float' to 'double' when passing argument to function // using printf() is a misery with this as C++ va_arg ellipsis changes float to double. #pragma clang diagnostic ignored "-Wenum-enum-conversion" // warning: bitwise operation between different enumeration types ('XXXFlags_' and 'XXXFlagsPrivate_') #pragma clang diagnostic ignored "-Wdeprecated-enum-enum-conversion"// warning: bitwise operation between different enumeration types ('XXXFlags_' and 'XXXFlagsPrivate_') is deprecated #pragma clang diagnostic ignored "-Wimplicit-int-float-conversion" // warning: implicit conversion from 'xxx' to 'float' may lose precision #elif defined(__GNUC__) #pragma GCC diagnostic ignored "-Wpragmas" // warning: unknown option after '#pragma GCC diagnostic' kind #pragma GCC diagnostic ignored "-Wformat-nonliteral" // warning: format not a string literal, format string not checked #pragma GCC diagnostic ignored "-Wclass-memaccess" // [__GNUC__ >= 8] warning: 'memset/memcpy' clearing/writing an object of type 'xxxx' with no trivial copy-assignment; use assignment or value-initialization instead #endif //----------------------------------------------------------------------------- // [SECTION] Tables: Main code //----------------------------------------------------------------------------- // Configuration static const int TABLE_DRAW_CHANNEL_BG0 = 0; static const int TABLE_DRAW_CHANNEL_BG2_FROZEN = 1; static const int TABLE_DRAW_CHANNEL_NOCLIP = 2; // When using ImGuiTableFlags_NoClip (this becomes the last visible channel) static const float TABLE_BORDER_SIZE = 1.0f; // FIXME-TABLE: Currently hard-coded because of clipping assumptions with outer borders rendering. static const float TABLE_RESIZE_SEPARATOR_HALF_THICKNESS = 4.0f; // Extend outside inner borders. static const float TABLE_RESIZE_SEPARATOR_FEEDBACK_TIMER = 0.06f; // Delay/timer before making the hover feedback (color+cursor) visible because tables/columns tends to be more cramped. // Helper inline ImGuiTableFlags TableFixFlags(ImGuiTableFlags flags, ImGuiWindow* outer_window) { // Adjust flags: set default sizing policy if ((flags & ImGuiTableFlags_SizingMask_) == 0) flags |= ((flags & ImGuiTableFlags_ScrollX) || (outer_window->Flags & ImGuiWindowFlags_AlwaysAutoResize)) ? ImGuiTableFlags_SizingFixedFit : ImGuiTableFlags_SizingStretchSame; // Adjust flags: enable NoKeepColumnsVisible when using ImGuiTableFlags_SizingFixedSame if ((flags & ImGuiTableFlags_SizingMask_) == ImGuiTableFlags_SizingFixedSame) flags |= ImGuiTableFlags_NoKeepColumnsVisible; // Adjust flags: enforce borders when resizable if (flags & ImGuiTableFlags_Resizable) flags |= ImGuiTableFlags_BordersInnerV; // Adjust flags: disable NoHostExtendX/NoHostExtendY if we have any scrolling going on if (flags & (ImGuiTableFlags_ScrollX | ImGuiTableFlags_ScrollY)) flags &= ~(ImGuiTableFlags_NoHostExtendX | ImGuiTableFlags_NoHostExtendY); // Adjust flags: NoBordersInBodyUntilResize takes priority over NoBordersInBody if (flags & ImGuiTableFlags_NoBordersInBodyUntilResize) flags &= ~ImGuiTableFlags_NoBordersInBody; // Adjust flags: disable saved settings if there's nothing to save if ((flags & (ImGuiTableFlags_Resizable | ImGuiTableFlags_Hideable | ImGuiTableFlags_Reorderable | ImGuiTableFlags_Sortable)) == 0) flags |= ImGuiTableFlags_NoSavedSettings; // Inherit _NoSavedSettings from top-level window (child windows always have _NoSavedSettings set) #ifdef IMGUI_HAS_DOCK ImGuiWindow* window_for_settings = outer_window->RootWindowDockStop; #else ImGuiWindow* window_for_settings = outer_window->RootWindow; #endif if (window_for_settings->Flags & ImGuiWindowFlags_NoSavedSettings) flags |= ImGuiTableFlags_NoSavedSettings; return flags; } ImGuiTable* ImGui::TableFindByID(ImGuiID id) { ImGuiContext& g = *GImGui; return g.Tables.GetByKey(id); } // Read about "TABLE SIZING" at the top of this file. bool ImGui::BeginTable(const char* str_id, int columns_count, ImGuiTableFlags flags, const ImVec2& outer_size, float inner_width) { ImGuiID id = GetID(str_id); return BeginTableEx(str_id, id, columns_count, flags, outer_size, inner_width); } bool ImGui::BeginTableEx(const char* name, ImGuiID id, int columns_count, ImGuiTableFlags flags, const ImVec2& outer_size, float inner_width) { ImGuiContext& g = *GImGui; ImGuiWindow* outer_window = GetCurrentWindow(); if (outer_window->SkipItems) // Consistent with other tables + beneficial side effect that assert on miscalling EndTable() will be more visible. return false; // Sanity checks IM_ASSERT(columns_count > 0 && columns_count <= IMGUI_TABLE_MAX_COLUMNS && "Only 1..64 columns allowed!"); if (flags & ImGuiTableFlags_ScrollX) IM_ASSERT(inner_width >= 0.0f); // If an outer size is specified ahead we will be able to early out when not visible. Exact clipping rules may evolve. const bool use_child_window = (flags & (ImGuiTableFlags_ScrollX | ImGuiTableFlags_ScrollY)) != 0; const ImVec2 avail_size = GetContentRegionAvail(); ImVec2 actual_outer_size = CalcItemSize(outer_size, ImMax(avail_size.x, 1.0f), use_child_window ? ImMax(avail_size.y, 1.0f) : 0.0f); ImRect outer_rect(outer_window->DC.CursorPos, outer_window->DC.CursorPos + actual_outer_size); if (use_child_window && IsClippedEx(outer_rect, 0, false)) { ItemSize(outer_rect); return false; } // Acquire storage for the table ImGuiTable* table = g.Tables.GetOrAddByKey(id); const int instance_no = (table->LastFrameActive != g.FrameCount) ? 0 : table->InstanceCurrent + 1; const ImGuiID instance_id = id + instance_no; const ImGuiTableFlags table_last_flags = table->Flags; if (instance_no > 0) IM_ASSERT(table->ColumnsCount == columns_count && "BeginTable(): Cannot change columns count mid-frame while preserving same ID"); // Fix flags table->IsDefaultSizingPolicy = (flags & ImGuiTableFlags_SizingMask_) == 0; flags = TableFixFlags(flags, outer_window); // Initialize table->ID = id; table->Flags = flags; table->InstanceCurrent = (ImS16)instance_no; table->LastFrameActive = g.FrameCount; table->OuterWindow = table->InnerWindow = outer_window; table->ColumnsCount = columns_count; table->IsLayoutLocked = false; table->InnerWidth = inner_width; table->UserOuterSize = outer_size; // When not using a child window, WorkRect.Max will grow as we append contents. if (use_child_window) { // Ensure no vertical scrollbar appears if we only want horizontal one, to make flag consistent // (we have no other way to disable vertical scrollbar of a window while keeping the horizontal one showing) ImVec2 override_content_size(FLT_MAX, FLT_MAX); if ((flags & ImGuiTableFlags_ScrollX) && !(flags & ImGuiTableFlags_ScrollY)) override_content_size.y = FLT_MIN; // Ensure specified width (when not specified, Stretched columns will act as if the width == OuterWidth and // never lead to any scrolling). We don't handle inner_width < 0.0f, we could potentially use it to right-align // based on the right side of the child window work rect, which would require knowing ahead if we are going to // have decoration taking horizontal spaces (typically a vertical scrollbar). if ((flags & ImGuiTableFlags_ScrollX) && inner_width > 0.0f) override_content_size.x = inner_width; if (override_content_size.x != FLT_MAX || override_content_size.y != FLT_MAX) SetNextWindowContentSize(ImVec2(override_content_size.x != FLT_MAX ? override_content_size.x : 0.0f, override_content_size.y != FLT_MAX ? override_content_size.y : 0.0f)); // Reset scroll if we are reactivating it if ((table_last_flags & (ImGuiTableFlags_ScrollX | ImGuiTableFlags_ScrollY)) == 0) SetNextWindowScroll(ImVec2(0.0f, 0.0f)); // Create scrolling region (without border and zero window padding) ImGuiWindowFlags child_flags = (flags & ImGuiTableFlags_ScrollX) ? ImGuiWindowFlags_HorizontalScrollbar : ImGuiWindowFlags_None; BeginChildEx(name, instance_id, outer_rect.GetSize(), false, child_flags); table->InnerWindow = g.CurrentWindow; table->WorkRect = table->InnerWindow->WorkRect; table->OuterRect = table->InnerWindow->Rect(); table->InnerRect = table->InnerWindow->InnerRect; IM_ASSERT(table->InnerWindow->WindowPadding.x == 0.0f && table->InnerWindow->WindowPadding.y == 0.0f && table->InnerWindow->WindowBorderSize == 0.0f); } else { // For non-scrolling tables, WorkRect == OuterRect == InnerRect. // But at this point we do NOT have a correct value for .Max.y (unless a height has been explicitly passed in). It will only be updated in EndTable(). table->WorkRect = table->OuterRect = table->InnerRect = outer_rect; } // Push a standardized ID for both child-using and not-child-using tables PushOverrideID(instance_id); // Backup a copy of host window members we will modify ImGuiWindow* inner_window = table->InnerWindow; table->HostIndentX = inner_window->DC.Indent.x; table->HostClipRect = inner_window->ClipRect; table->HostSkipItems = inner_window->SkipItems; table->HostBackupWorkRect = inner_window->WorkRect; table->HostBackupParentWorkRect = inner_window->ParentWorkRect; table->HostBackupColumnsOffset = outer_window->DC.ColumnsOffset; table->HostBackupPrevLineSize = inner_window->DC.PrevLineSize; table->HostBackupCurrLineSize = inner_window->DC.CurrLineSize; table->HostBackupCursorMaxPos = inner_window->DC.CursorMaxPos; table->HostBackupItemWidth = outer_window->DC.ItemWidth; table->HostBackupItemWidthStackSize = outer_window->DC.ItemWidthStack.Size; inner_window->DC.PrevLineSize = inner_window->DC.CurrLineSize = ImVec2(0.0f, 0.0f); // Padding and Spacing // - None ........Content..... Pad .....Content........ // - PadOuter | Pad ..Content..... Pad .....Content.. Pad | // - PadInner ........Content.. Pad | Pad ..Content........ // - PadOuter+PadInner | Pad ..Content.. Pad | Pad ..Content.. Pad | const bool pad_outer_x = (flags & ImGuiTableFlags_NoPadOuterX) ? false : (flags & ImGuiTableFlags_PadOuterX) ? true : (flags & ImGuiTableFlags_BordersOuterV) != 0; const bool pad_inner_x = (flags & ImGuiTableFlags_NoPadInnerX) ? false : true; const float inner_spacing_for_border = (flags & ImGuiTableFlags_BordersInnerV) ? TABLE_BORDER_SIZE : 0.0f; const float inner_spacing_explicit = (pad_inner_x && (flags & ImGuiTableFlags_BordersInnerV) == 0) ? g.Style.CellPadding.x : 0.0f; const float inner_padding_explicit = (pad_inner_x && (flags & ImGuiTableFlags_BordersInnerV) != 0) ? g.Style.CellPadding.x : 0.0f; table->CellSpacingX1 = inner_spacing_explicit + inner_spacing_for_border; table->CellSpacingX2 = inner_spacing_explicit; table->CellPaddingX = inner_padding_explicit; table->CellPaddingY = g.Style.CellPadding.y; const float outer_padding_for_border = (flags & ImGuiTableFlags_BordersOuterV) ? TABLE_BORDER_SIZE : 0.0f; const float outer_padding_explicit = pad_outer_x ? g.Style.CellPadding.x : 0.0f; table->OuterPaddingX = (outer_padding_for_border + outer_padding_explicit) - table->CellPaddingX; table->CurrentColumn = -1; table->CurrentRow = -1; table->RowBgColorCounter = 0; table->LastRowFlags = ImGuiTableRowFlags_None; table->InnerClipRect = (inner_window == outer_window) ? table->WorkRect : inner_window->ClipRect; table->InnerClipRect.ClipWith(table->WorkRect); // We need this to honor inner_width table->InnerClipRect.ClipWithFull(table->HostClipRect); table->InnerClipRect.Max.y = (flags & ImGuiTableFlags_NoHostExtendY) ? ImMin(table->InnerClipRect.Max.y, inner_window->WorkRect.Max.y) : inner_window->ClipRect.Max.y; table->RowPosY1 = table->RowPosY2 = table->WorkRect.Min.y; // This is needed somehow table->RowTextBaseline = 0.0f; // This will be cleared again by TableBeginRow() table->FreezeRowsRequest = table->FreezeRowsCount = 0; // This will be setup by TableSetupScrollFreeze(), if any table->FreezeColumnsRequest = table->FreezeColumnsCount = 0; table->IsUnfrozenRows = true; table->DeclColumnsCount = 0; // Using opaque colors facilitate overlapping elements of the grid table->BorderColorStrong = GetColorU32(ImGuiCol_TableBorderStrong); table->BorderColorLight = GetColorU32(ImGuiCol_TableBorderLight); // Make table current const int table_idx = g.Tables.GetIndex(table); g.CurrentTableStack.push_back(ImGuiPtrOrIndex(table_idx)); g.CurrentTable = table; outer_window->DC.CurrentTableIdx = table_idx; if (inner_window != outer_window) // So EndChild() within the inner window can restore the table properly. inner_window->DC.CurrentTableIdx = table_idx; if ((table_last_flags & ImGuiTableFlags_Reorderable) && (flags & ImGuiTableFlags_Reorderable) == 0) table->IsResetDisplayOrderRequest = true; // Mark as used if (table_idx >= g.TablesLastTimeActive.Size) g.TablesLastTimeActive.resize(table_idx + 1, -1.0f); g.TablesLastTimeActive[table_idx] = (float)g.Time; table->MemoryCompacted = false; // Setup memory buffer (clear data if columns count changed) const int stored_size = table->Columns.size(); if (stored_size != 0 && stored_size != columns_count) { IM_FREE(table->RawData); table->RawData = NULL; } if (table->RawData == NULL) { TableBeginInitMemory(table, columns_count); table->IsInitializing = table->IsSettingsRequestLoad = true; } if (table->IsResetAllRequest) TableResetSettings(table); if (table->IsInitializing) { // Initialize table->SettingsOffset = -1; table->IsSortSpecsDirty = true; table->InstanceInteracted = -1; table->ContextPopupColumn = -1; table->ReorderColumn = table->ResizedColumn = table->LastResizedColumn = -1; table->AutoFitSingleColumn = -1; table->HoveredColumnBody = table->HoveredColumnBorder = -1; for (int n = 0; n < columns_count; n++) { ImGuiTableColumn* column = &table->Columns[n]; float width_auto = column->WidthAuto; *column = ImGuiTableColumn(); column->WidthAuto = width_auto; column->IsPreserveWidthAuto = true; // Preserve WidthAuto when reinitializing a live table: not technically necessary but remove a visible flicker column->DisplayOrder = table->DisplayOrderToIndex[n] = (ImGuiTableColumnIdx)n; column->IsEnabled = column->IsEnabledNextFrame = true; } } // Load settings if (table->IsSettingsRequestLoad) TableLoadSettings(table); // Handle DPI/font resize // This is designed to facilitate DPI changes with the assumption that e.g. style.CellPadding has been scaled as well. // It will also react to changing fonts with mixed results. It doesn't need to be perfect but merely provide a decent transition. // FIXME-DPI: Provide consistent standards for reference size. Perhaps using g.CurrentDpiScale would be more self explanatory. // This is will lead us to non-rounded WidthRequest in columns, which should work but is a poorly tested path. const float new_ref_scale_unit = g.FontSize; // g.Font->GetCharAdvance('A') ? if (table->RefScale != 0.0f && table->RefScale != new_ref_scale_unit) { const float scale_factor = new_ref_scale_unit / table->RefScale; //IMGUI_DEBUG_LOG("[table] %08X RefScaleUnit %.3f -> %.3f, scaling width by %.3f\n", table->ID, table->RefScaleUnit, new_ref_scale_unit, scale_factor); for (int n = 0; n < columns_count; n++) table->Columns[n].WidthRequest = table->Columns[n].WidthRequest * scale_factor; } table->RefScale = new_ref_scale_unit; // Disable output until user calls TableNextRow() or TableNextColumn() leading to the TableUpdateLayout() call.. // This is not strictly necessary but will reduce cases were "out of table" output will be misleading to the user. // Because we cannot safely assert in EndTable() when no rows have been created, this seems like our best option. inner_window->SkipItems = true; // Clear names // At this point the ->NameOffset field of each column will be invalid until TableUpdateLayout() or the first call to TableSetupColumn() if (table->ColumnsNames.Buf.Size > 0) table->ColumnsNames.Buf.resize(0); // Apply queued resizing/reordering/hiding requests TableBeginApplyRequests(table); return true; } // For reference, the average total _allocation count_ for a table is: // + 0 (for ImGuiTable instance, we are pooling allocations in g.Tables) // + 1 (for table->RawData allocated below) // + 1 (for table->ColumnsNames, if names are used) // + 1 (for table->Splitter._Channels) // + 2 * active_channels_count (for ImDrawCmd and ImDrawIdx buffers inside channels) // Where active_channels_count is variable but often == columns_count or columns_count + 1, see TableSetupDrawChannels() for details. // Unused channels don't perform their +2 allocations. void ImGui::TableBeginInitMemory(ImGuiTable* table, int columns_count) { // Allocate single buffer for our arrays ImSpanAllocator<3> span_allocator; span_allocator.Reserve(0, columns_count * sizeof(ImGuiTableColumn)); span_allocator.Reserve(1, columns_count * sizeof(ImGuiTableColumnIdx)); span_allocator.Reserve(2, columns_count * sizeof(ImGuiTableCellData), 4); table->RawData = IM_ALLOC(span_allocator.GetArenaSizeInBytes()); memset(table->RawData, 0, span_allocator.GetArenaSizeInBytes()); span_allocator.SetArenaBasePtr(table->RawData); span_allocator.GetSpan(0, &table->Columns); span_allocator.GetSpan(1, &table->DisplayOrderToIndex); span_allocator.GetSpan(2, &table->RowCellData); } // Apply queued resizing/reordering/hiding requests void ImGui::TableBeginApplyRequests(ImGuiTable* table) { // Handle resizing request // (We process this at the first TableBegin of the frame) // FIXME-TABLE: Contains columns if our work area doesn't allow for scrolling? if (table->InstanceCurrent == 0) { if (table->ResizedColumn != -1 && table->ResizedColumnNextWidth != FLT_MAX) TableSetColumnWidth(table->ResizedColumn, table->ResizedColumnNextWidth); table->LastResizedColumn = table->ResizedColumn; table->ResizedColumnNextWidth = FLT_MAX; table->ResizedColumn = -1; // Process auto-fit for single column, which is a special case for stretch columns and fixed columns with FixedSame policy. // FIXME-TABLE: Would be nice to redistribute available stretch space accordingly to other weights, instead of giving it all to siblings. if (table->AutoFitSingleColumn != -1) { TableSetColumnWidth(table->AutoFitSingleColumn, table->Columns[table->AutoFitSingleColumn].WidthAuto); table->AutoFitSingleColumn = -1; } } // Handle reordering request // Note: we don't clear ReorderColumn after handling the request. if (table->InstanceCurrent == 0) { if (table->HeldHeaderColumn == -1 && table->ReorderColumn != -1) table->ReorderColumn = -1; table->HeldHeaderColumn = -1; if (table->ReorderColumn != -1 && table->ReorderColumnDir != 0) { // We need to handle reordering across hidden columns. // In the configuration below, moving C to the right of E will lead to: // ... C [D] E ---> ... [D] E C (Column name/index) // ... 2 3 4 ... 2 3 4 (Display order) const int reorder_dir = table->ReorderColumnDir; IM_ASSERT(reorder_dir == -1 || reorder_dir == +1); IM_ASSERT(table->Flags & ImGuiTableFlags_Reorderable); ImGuiTableColumn* src_column = &table->Columns[table->ReorderColumn]; ImGuiTableColumn* dst_column = &table->Columns[(reorder_dir == -1) ? src_column->PrevEnabledColumn : src_column->NextEnabledColumn]; IM_UNUSED(dst_column); const int src_order = src_column->DisplayOrder; const int dst_order = dst_column->DisplayOrder; src_column->DisplayOrder = (ImGuiTableColumnIdx)dst_order; for (int order_n = src_order + reorder_dir; order_n != dst_order + reorder_dir; order_n += reorder_dir) table->Columns[table->DisplayOrderToIndex[order_n]].DisplayOrder -= (ImGuiTableColumnIdx)reorder_dir; IM_ASSERT(dst_column->DisplayOrder == dst_order - reorder_dir); // Display order is stored in both columns->IndexDisplayOrder and table->DisplayOrder[], // rebuild the later from the former. for (int column_n = 0; column_n < table->ColumnsCount; column_n++) table->DisplayOrderToIndex[table->Columns[column_n].DisplayOrder] = (ImGuiTableColumnIdx)column_n; table->ReorderColumnDir = 0; table->IsSettingsDirty = true; } } // Handle display order reset request if (table->IsResetDisplayOrderRequest) { for (int n = 0; n < table->ColumnsCount; n++) table->DisplayOrderToIndex[n] = table->Columns[n].DisplayOrder = (ImGuiTableColumnIdx)n; table->IsResetDisplayOrderRequest = false; table->IsSettingsDirty = true; } } // Adjust flags: default width mode + stretch columns are not allowed when auto extending static void TableSetupColumnFlags(ImGuiTable* table, ImGuiTableColumn* column, ImGuiTableColumnFlags flags_in) { ImGuiTableColumnFlags flags = flags_in; // Sizing Policy if ((flags & ImGuiTableColumnFlags_WidthMask_) == 0) { const ImGuiTableFlags table_sizing_policy = (table->Flags & ImGuiTableFlags_SizingMask_); if (table_sizing_policy == ImGuiTableFlags_SizingFixedFit || table_sizing_policy == ImGuiTableFlags_SizingFixedSame) flags |= ImGuiTableColumnFlags_WidthFixed; else flags |= ImGuiTableColumnFlags_WidthStretch; } else { IM_ASSERT(ImIsPowerOfTwo(flags & ImGuiTableColumnFlags_WidthMask_)); // Check that only 1 of each set is used. } // Resize if ((table->Flags & ImGuiTableFlags_Resizable) == 0) flags |= ImGuiTableColumnFlags_NoResize; // Sorting if ((flags & ImGuiTableColumnFlags_NoSortAscending) && (flags & ImGuiTableColumnFlags_NoSortDescending)) flags |= ImGuiTableColumnFlags_NoSort; // Indentation if ((flags & ImGuiTableColumnFlags_IndentMask_) == 0) flags |= (table->Columns.index_from_ptr(column) == 0) ? ImGuiTableColumnFlags_IndentEnable : ImGuiTableColumnFlags_IndentDisable; // Alignment //if ((flags & ImGuiTableColumnFlags_AlignMask_) == 0) // flags |= ImGuiTableColumnFlags_AlignCenter; //IM_ASSERT(ImIsPowerOfTwo(flags & ImGuiTableColumnFlags_AlignMask_)); // Check that only 1 of each set is used. // Preserve status flags column->Flags = flags | (column->Flags & ImGuiTableColumnFlags_StatusMask_); // Build an ordered list of available sort directions column->SortDirectionsAvailCount = column->SortDirectionsAvailMask = column->SortDirectionsAvailList = 0; if (table->Flags & ImGuiTableFlags_Sortable) { int count = 0, mask = 0, list = 0; if ((flags & ImGuiTableColumnFlags_PreferSortAscending) != 0 && (flags & ImGuiTableColumnFlags_NoSortAscending) == 0) { mask |= 1 << ImGuiSortDirection_Ascending; list |= ImGuiSortDirection_Ascending << (count << 1); count++; } if ((flags & ImGuiTableColumnFlags_PreferSortDescending) != 0 && (flags & ImGuiTableColumnFlags_NoSortDescending) == 0) { mask |= 1 << ImGuiSortDirection_Descending; list |= ImGuiSortDirection_Descending << (count << 1); count++; } if ((flags & ImGuiTableColumnFlags_PreferSortAscending) == 0 && (flags & ImGuiTableColumnFlags_NoSortAscending) == 0) { mask |= 1 << ImGuiSortDirection_Ascending; list |= ImGuiSortDirection_Ascending << (count << 1); count++; } if ((flags & ImGuiTableColumnFlags_PreferSortDescending) == 0 && (flags & ImGuiTableColumnFlags_NoSortDescending) == 0) { mask |= 1 << ImGuiSortDirection_Descending; list |= ImGuiSortDirection_Descending << (count << 1); count++; } if ((table->Flags & ImGuiTableFlags_SortTristate) || count == 0) { mask |= 1 << ImGuiSortDirection_None; count++; } column->SortDirectionsAvailList = (ImU8)list; column->SortDirectionsAvailMask = (ImU8)mask; column->SortDirectionsAvailCount = (ImU8)count; ImGui::TableFixColumnSortDirection(table, column); } } // Layout columns for the frame. This is in essence the followup to BeginTable(). // Runs on the first call to TableNextRow(), to give a chance for TableSetupColumn() to be called first. // FIXME-TABLE: Our width (and therefore our WorkRect) will be minimal in the first frame for _WidthAuto columns. // Increase feedback side-effect with widgets relying on WorkRect.Max.x... Maybe provide a default distribution for _WidthAuto columns? void ImGui::TableUpdateLayout(ImGuiTable* table) { ImGuiContext& g = *GImGui; IM_ASSERT(table->IsLayoutLocked == false); const ImGuiTableFlags table_sizing_policy = (table->Flags & ImGuiTableFlags_SizingMask_); table->IsDefaultDisplayOrder = true; table->ColumnsEnabledCount = 0; table->EnabledMaskByIndex = 0x00; table->EnabledMaskByDisplayOrder = 0x00; table->LeftMostEnabledColumn = -1; table->MinColumnWidth = ImMax(1.0f, g.Style.FramePadding.x * 1.0f); // g.Style.ColumnsMinSpacing; // FIXME-TABLE // [Part 1] Apply/lock Enabled and Order states. Calculate auto/ideal width for columns. Count fixed/stretch columns. // Process columns in their visible orders as we are building the Prev/Next indices. int count_fixed = 0; // Number of columns that have fixed sizing policies int count_stretch = 0; // Number of columns that have stretch sizing policies int prev_visible_column_idx = -1; bool has_auto_fit_request = false; bool has_resizable = false; float stretch_sum_width_auto = 0.0f; float fixed_max_width_auto = 0.0f; for (int order_n = 0; order_n < table->ColumnsCount; order_n++) { const int column_n = table->DisplayOrderToIndex[order_n]; if (column_n != order_n) table->IsDefaultDisplayOrder = false; ImGuiTableColumn* column = &table->Columns[column_n]; // Clear column setup if not submitted by user. Currently we make it mandatory to call TableSetupColumn() every frame. // It would easily work without but we're not ready to guarantee it since e.g. names need resubmission anyway. // We take a slight shortcut but in theory we could be calling TableSetupColumn() here with dummy values, it should yield the same effect. if (table->DeclColumnsCount <= column_n) { TableSetupColumnFlags(table, column, ImGuiTableColumnFlags_None); column->NameOffset = -1; column->UserID = 0; column->InitStretchWeightOrWidth = -1.0f; } // Update Enabled state, mark settings/sortspecs dirty if (!(table->Flags & ImGuiTableFlags_Hideable) || (column->Flags & ImGuiTableColumnFlags_NoHide)) column->IsEnabledNextFrame = true; if (column->IsEnabled != column->IsEnabledNextFrame) { column->IsEnabled = column->IsEnabledNextFrame; table->IsSettingsDirty = true; if (!column->IsEnabled && column->SortOrder != -1) table->IsSortSpecsDirty = true; } if (column->SortOrder > 0 && !(table->Flags & ImGuiTableFlags_SortMulti)) table->IsSortSpecsDirty = true; // Auto-fit unsized columns const bool start_auto_fit = (column->Flags & ImGuiTableColumnFlags_WidthFixed) ? (column->WidthRequest < 0.0f) : (column->StretchWeight < 0.0f); if (start_auto_fit) column->AutoFitQueue = column->CannotSkipItemsQueue = (1 << 3) - 1; // Fit for three frames if (!column->IsEnabled) { column->IndexWithinEnabledSet = -1; continue; } // Mark as enabled and link to previous/next enabled column column->PrevEnabledColumn = (ImGuiTableColumnIdx)prev_visible_column_idx; column->NextEnabledColumn = -1; if (prev_visible_column_idx != -1) table->Columns[prev_visible_column_idx].NextEnabledColumn = (ImGuiTableColumnIdx)column_n; else table->LeftMostEnabledColumn = (ImGuiTableColumnIdx)column_n; column->IndexWithinEnabledSet = table->ColumnsEnabledCount++; table->EnabledMaskByIndex |= (ImU64)1 << column_n; table->EnabledMaskByDisplayOrder |= (ImU64)1 << column->DisplayOrder; prev_visible_column_idx = column_n; IM_ASSERT(column->IndexWithinEnabledSet <= column->DisplayOrder); // Calculate ideal/auto column width (that's the width required for all contents to be visible without clipping) // Combine width from regular rows + width from headers unless requested not to. if (!column->IsPreserveWidthAuto) column->WidthAuto = TableGetColumnWidthAuto(table, column); // Non-resizable columns keep their requested width (apply user value regardless of IsPreserveWidthAuto) const bool column_is_resizable = (column->Flags & ImGuiTableColumnFlags_NoResize) == 0; if (column_is_resizable) has_resizable = true; if ((column->Flags & ImGuiTableColumnFlags_WidthFixed) && column->InitStretchWeightOrWidth > 0.0f && !column_is_resizable) column->WidthAuto = column->InitStretchWeightOrWidth; if (column->AutoFitQueue != 0x00) has_auto_fit_request = true; if (column->Flags & ImGuiTableColumnFlags_WidthStretch) { stretch_sum_width_auto += column->WidthAuto; count_stretch++; } else { fixed_max_width_auto = ImMax(fixed_max_width_auto, column->WidthAuto); count_fixed++; } } if ((table->Flags & ImGuiTableFlags_Sortable) && table->SortSpecsCount == 0 && !(table->Flags & ImGuiTableFlags_SortTristate)) table->IsSortSpecsDirty = true; table->RightMostEnabledColumn = (ImGuiTableColumnIdx)prev_visible_column_idx; IM_ASSERT(table->LeftMostEnabledColumn >= 0 && table->RightMostEnabledColumn >= 0); // [Part 2] Disable child window clipping while fitting columns. This is not strictly necessary but makes it possible // to avoid the column fitting having to wait until the first visible frame of the child container (may or not be a good thing). // FIXME-TABLE: for always auto-resizing columns may not want to do that all the time. if (has_auto_fit_request && table->OuterWindow != table->InnerWindow) table->InnerWindow->SkipItems = false; if (has_auto_fit_request) table->IsSettingsDirty = true; // [Part 3] Fix column flags and record a few extra information. float sum_width_requests = 0.0f; // Sum of all width for fixed and auto-resize columns, excluding width contributed by Stretch columns but including spacing/padding. float stretch_sum_weights = 0.0f; // Sum of all weights for stretch columns. table->LeftMostStretchedColumn = table->RightMostStretchedColumn = -1; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { if (!(table->EnabledMaskByIndex & ((ImU64)1 << column_n))) continue; ImGuiTableColumn* column = &table->Columns[column_n]; const bool column_is_resizable = (column->Flags & ImGuiTableColumnFlags_NoResize) == 0; if (column->Flags & ImGuiTableColumnFlags_WidthFixed) { // Apply same widths policy float width_auto = column->WidthAuto; if (table_sizing_policy == ImGuiTableFlags_SizingFixedSame && (column->AutoFitQueue != 0x00 || !column_is_resizable)) width_auto = fixed_max_width_auto; // Apply automatic width // Latch initial size for fixed columns and update it constantly for auto-resizing column (unless clipped!) if (column->AutoFitQueue != 0x00) column->WidthRequest = width_auto; else if ((column->Flags & ImGuiTableColumnFlags_WidthFixed) && !column_is_resizable && (table->RequestOutputMaskByIndex & ((ImU64)1 << column_n))) column->WidthRequest = width_auto; // FIXME-TABLE: Increase minimum size during init frame to avoid biasing auto-fitting widgets // (e.g. TextWrapped) too much. Otherwise what tends to happen is that TextWrapped would output a very // large height (= first frame scrollbar display very off + clipper would skip lots of items). // This is merely making the side-effect less extreme, but doesn't properly fixes it. // FIXME: Move this to ->WidthGiven to avoid temporary lossyless? // FIXME: This break IsPreserveWidthAuto from not flickering if the stored WidthAuto was smaller. if (column->AutoFitQueue > 0x01 && table->IsInitializing && !column->IsPreserveWidthAuto) column->WidthRequest = ImMax(column->WidthRequest, table->MinColumnWidth * 4.0f); // FIXME-TABLE: Another constant/scale? sum_width_requests += column->WidthRequest; } else { // Initialize stretch weight if (column->AutoFitQueue != 0x00 || column->StretchWeight < 0.0f || !column_is_resizable) { if (column->InitStretchWeightOrWidth > 0.0f) column->StretchWeight = column->InitStretchWeightOrWidth; else if (table_sizing_policy == ImGuiTableFlags_SizingStretchProp) column->StretchWeight = (column->WidthAuto / stretch_sum_width_auto) * count_stretch; else column->StretchWeight = 1.0f; } stretch_sum_weights += column->StretchWeight; if (table->LeftMostStretchedColumn == -1 || table->Columns[table->LeftMostStretchedColumn].DisplayOrder > column->DisplayOrder) table->LeftMostStretchedColumn = (ImGuiTableColumnIdx)column_n; if (table->RightMostStretchedColumn == -1 || table->Columns[table->RightMostStretchedColumn].DisplayOrder < column->DisplayOrder) table->RightMostStretchedColumn = (ImGuiTableColumnIdx)column_n; } column->IsPreserveWidthAuto = false; sum_width_requests += table->CellPaddingX * 2.0f; } table->ColumnsEnabledFixedCount = (ImGuiTableColumnIdx)count_fixed; // [Part 4] Apply final widths based on requested widths const ImRect work_rect = table->WorkRect; const float width_spacings = (table->OuterPaddingX * 2.0f) + (table->CellSpacingX1 + table->CellSpacingX2) * (table->ColumnsEnabledCount - 1); const float width_avail = ((table->Flags & ImGuiTableFlags_ScrollX) && table->InnerWidth == 0.0f) ? table->InnerClipRect.GetWidth() : work_rect.GetWidth(); const float width_avail_for_stretched_columns = width_avail - width_spacings - sum_width_requests; float width_remaining_for_stretched_columns = width_avail_for_stretched_columns; table->ColumnsGivenWidth = width_spacings + (table->CellPaddingX * 2.0f) * table->ColumnsEnabledCount; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { if (!(table->EnabledMaskByIndex & ((ImU64)1 << column_n))) continue; ImGuiTableColumn* column = &table->Columns[column_n]; // Allocate width for stretched/weighted columns (StretchWeight gets converted into WidthRequest) if (column->Flags & ImGuiTableColumnFlags_WidthStretch) { float weight_ratio = column->StretchWeight / stretch_sum_weights; column->WidthRequest = IM_FLOOR(ImMax(width_avail_for_stretched_columns * weight_ratio, table->MinColumnWidth) + 0.01f); width_remaining_for_stretched_columns -= column->WidthRequest; } // [Resize Rule 1] The right-most Visible column is not resizable if there is at least one Stretch column // See additional comments in TableSetColumnWidth(). if (column->NextEnabledColumn == -1 && table->LeftMostStretchedColumn != -1) column->Flags |= ImGuiTableColumnFlags_NoDirectResize_; // Assign final width, record width in case we will need to shrink column->WidthGiven = ImFloor(ImMax(column->WidthRequest, table->MinColumnWidth)); table->ColumnsGivenWidth += column->WidthGiven; } // [Part 5] Redistribute stretch remainder width due to rounding (remainder width is < 1.0f * number of Stretch column). // Using right-to-left distribution (more likely to match resizing cursor). if (width_remaining_for_stretched_columns >= 1.0f && !(table->Flags & ImGuiTableFlags_PreciseWidths)) for (int order_n = table->ColumnsCount - 1; stretch_sum_weights > 0.0f && width_remaining_for_stretched_columns >= 1.0f && order_n >= 0; order_n--) { if (!(table->EnabledMaskByDisplayOrder & ((ImU64)1 << order_n))) continue; ImGuiTableColumn* column = &table->Columns[table->DisplayOrderToIndex[order_n]]; if (!(column->Flags & ImGuiTableColumnFlags_WidthStretch)) continue; column->WidthRequest += 1.0f; column->WidthGiven += 1.0f; width_remaining_for_stretched_columns -= 1.0f; } table->HoveredColumnBody = -1; table->HoveredColumnBorder = -1; const ImRect mouse_hit_rect(table->OuterRect.Min.x, table->OuterRect.Min.y, table->OuterRect.Max.x, ImMax(table->OuterRect.Max.y, table->OuterRect.Min.y + table->LastOuterHeight)); const bool is_hovering_table = ItemHoverable(mouse_hit_rect, 0); // [Part 6] Setup final position, offset, skip/clip states and clipping rectangles, detect hovered column // Process columns in their visible orders as we are comparing the visible order and adjusting host_clip_rect while looping. int visible_n = 0; bool offset_x_frozen = (table->FreezeColumnsCount > 0); float offset_x = ((table->FreezeColumnsCount > 0) ? table->OuterRect.Min.x : work_rect.Min.x) + table->OuterPaddingX - table->CellSpacingX1; ImRect host_clip_rect = table->InnerClipRect; //host_clip_rect.Max.x += table->CellPaddingX + table->CellSpacingX2; table->VisibleMaskByIndex = 0x00; table->RequestOutputMaskByIndex = 0x00; for (int order_n = 0; order_n < table->ColumnsCount; order_n++) { const int column_n = table->DisplayOrderToIndex[order_n]; ImGuiTableColumn* column = &table->Columns[column_n]; column->NavLayerCurrent = (ImS8)((table->FreezeRowsCount > 0 || column_n < table->FreezeColumnsCount) ? ImGuiNavLayer_Menu : ImGuiNavLayer_Main); if (offset_x_frozen && table->FreezeColumnsCount == visible_n) { offset_x += work_rect.Min.x - table->OuterRect.Min.x; offset_x_frozen = false; } // Clear status flags column->Flags &= ~ImGuiTableColumnFlags_StatusMask_; if ((table->EnabledMaskByDisplayOrder & ((ImU64)1 << order_n)) == 0) { // Hidden column: clear a few fields and we are done with it for the remainder of the function. // We set a zero-width clip rect but set Min.y/Max.y properly to not interfere with the clipper. column->MinX = column->MaxX = column->WorkMinX = column->ClipRect.Min.x = column->ClipRect.Max.x = offset_x; column->WidthGiven = 0.0f; column->ClipRect.Min.y = work_rect.Min.y; column->ClipRect.Max.y = FLT_MAX; column->ClipRect.ClipWithFull(host_clip_rect); column->IsVisibleX = column->IsVisibleY = column->IsRequestOutput = false; column->IsSkipItems = true; column->ItemWidth = 1.0f; continue; } // Detect hovered column if (is_hovering_table && g.IO.MousePos.x >= column->ClipRect.Min.x && g.IO.MousePos.x < column->ClipRect.Max.x) table->HoveredColumnBody = (ImGuiTableColumnIdx)column_n; // Lock start position column->MinX = offset_x; // Lock width based on start position and minimum/maximum width for this position float max_width = TableGetMaxColumnWidth(table, column_n); column->WidthGiven = ImMin(column->WidthGiven, max_width); column->WidthGiven = ImMax(column->WidthGiven, ImMin(column->WidthRequest, table->MinColumnWidth)); column->MaxX = offset_x + column->WidthGiven + table->CellSpacingX1 + table->CellSpacingX2 + table->CellPaddingX * 2.0f; // Lock other positions // - ClipRect.Min.x: Because merging draw commands doesn't compare min boundaries, we make ClipRect.Min.x match left bounds to be consistent regardless of merging. // - ClipRect.Max.x: using WorkMaxX instead of MaxX (aka including padding) makes things more consistent when resizing down, tho slightly detrimental to visibility in very-small column. // - ClipRect.Max.x: using MaxX makes it easier for header to receive hover highlight with no discontinuity and display sorting arrow. // - FIXME-TABLE: We want equal width columns to have equal (ClipRect.Max.x - WorkMinX) width, which means ClipRect.max.x cannot stray off host_clip_rect.Max.x else right-most column may appear shorter. column->WorkMinX = column->MinX + table->CellPaddingX + table->CellSpacingX1; column->WorkMaxX = column->MaxX - table->CellPaddingX - table->CellSpacingX2; // Expected max column->ItemWidth = ImFloor(column->WidthGiven * 0.65f); column->ClipRect.Min.x = column->MinX; column->ClipRect.Min.y = work_rect.Min.y; column->ClipRect.Max.x = column->MaxX; //column->WorkMaxX; column->ClipRect.Max.y = FLT_MAX; column->ClipRect.ClipWithFull(host_clip_rect); // Mark column as Clipped (not in sight) // Note that scrolling tables (where inner_window != outer_window) handle Y clipped earlier in BeginTable() so IsVisibleY really only applies to non-scrolling tables. // FIXME-TABLE: Because InnerClipRect.Max.y is conservatively ==outer_window->ClipRect.Max.y, we never can mark columns _Above_ the scroll line as not IsVisibleY. // Taking advantage of LastOuterHeight would yield good results there... // FIXME-TABLE: Y clipping is disabled because it effectively means not submitting will reduce contents width which is fed to outer_window->DC.CursorMaxPos.x, // and this may be used (e.g. typically by outer_window using AlwaysAutoResize or outer_window's horizontal scrollbar, but could be something else). // Possible solution to preserve last known content width for clipped column. Test 'table_reported_size' fails when enabling Y clipping and window is resized small. column->IsVisibleX = (column->ClipRect.Max.x > column->ClipRect.Min.x); column->IsVisibleY = true; // (column->ClipRect.Max.y > column->ClipRect.Min.y); const bool is_visible = column->IsVisibleX; //&& column->IsVisibleY; if (is_visible) table->VisibleMaskByIndex |= ((ImU64)1 << column_n); // Mark column as requesting output from user. Note that fixed + non-resizable sets are auto-fitting at all times and therefore always request output. column->IsRequestOutput = is_visible || column->AutoFitQueue != 0 || column->CannotSkipItemsQueue != 0; if (column->IsRequestOutput) table->RequestOutputMaskByIndex |= ((ImU64)1 << column_n); // Mark column as SkipItems (ignoring all items/layout) column->IsSkipItems = !column->IsEnabled || table->HostSkipItems; if (column->IsSkipItems) IM_ASSERT(!is_visible); // Update status flags column->Flags |= ImGuiTableColumnFlags_IsEnabled; if (is_visible) column->Flags |= ImGuiTableColumnFlags_IsVisible; if (column->SortOrder != -1) column->Flags |= ImGuiTableColumnFlags_IsSorted; if (table->HoveredColumnBody == column_n) column->Flags |= ImGuiTableColumnFlags_IsHovered; // Alignment // FIXME-TABLE: This align based on the whole column width, not per-cell, and therefore isn't useful in // many cases (to be able to honor this we might be able to store a log of cells width, per row, for // visible rows, but nav/programmatic scroll would have visible artifacts.) //if (column->Flags & ImGuiTableColumnFlags_AlignRight) // column->WorkMinX = ImMax(column->WorkMinX, column->MaxX - column->ContentWidthRowsUnfrozen); //else if (column->Flags & ImGuiTableColumnFlags_AlignCenter) // column->WorkMinX = ImLerp(column->WorkMinX, ImMax(column->StartX, column->MaxX - column->ContentWidthRowsUnfrozen), 0.5f); // Reset content width variables column->ContentMaxXFrozen = column->ContentMaxXUnfrozen = column->WorkMinX; column->ContentMaxXHeadersUsed = column->ContentMaxXHeadersIdeal = column->WorkMinX; // Don't decrement auto-fit counters until container window got a chance to submit its items if (table->HostSkipItems == false) { column->AutoFitQueue >>= 1; column->CannotSkipItemsQueue >>= 1; } if (visible_n < table->FreezeColumnsCount) host_clip_rect.Min.x = ImClamp(column->MaxX + TABLE_BORDER_SIZE, host_clip_rect.Min.x, host_clip_rect.Max.x); offset_x += column->WidthGiven + table->CellSpacingX1 + table->CellSpacingX2 + table->CellPaddingX * 2.0f; visible_n++; } // [Part 7] Detect/store when we are hovering the unused space after the right-most column (so e.g. context menus can react on it) // Clear Resizable flag if none of our column are actually resizable (either via an explicit _NoResize flag, either // because of using _WidthAuto/_WidthStretch). This will hide the resizing option from the context menu. const float unused_x1 = ImMax(table->WorkRect.Min.x, table->Columns[table->RightMostEnabledColumn].ClipRect.Max.x); if (is_hovering_table && table->HoveredColumnBody == -1) { if (g.IO.MousePos.x >= unused_x1) table->HoveredColumnBody = (ImGuiTableColumnIdx)table->ColumnsCount; } if (has_resizable == false && (table->Flags & ImGuiTableFlags_Resizable)) table->Flags &= ~ImGuiTableFlags_Resizable; // [Part 8] Lock actual OuterRect/WorkRect right-most position. // This is done late to handle the case of fixed-columns tables not claiming more widths that they need. // Because of this we are careful with uses of WorkRect and InnerClipRect before this point. if (table->RightMostStretchedColumn != -1) table->Flags &= ~ImGuiTableFlags_NoHostExtendX; if (table->Flags & ImGuiTableFlags_NoHostExtendX) { table->OuterRect.Max.x = table->WorkRect.Max.x = unused_x1; table->InnerClipRect.Max.x = ImMin(table->InnerClipRect.Max.x, unused_x1); } table->InnerWindow->ParentWorkRect = table->WorkRect; table->BorderX1 = table->InnerClipRect.Min.x;// +((table->Flags & ImGuiTableFlags_BordersOuter) ? 0.0f : -1.0f); table->BorderX2 = table->InnerClipRect.Max.x;// +((table->Flags & ImGuiTableFlags_BordersOuter) ? 0.0f : +1.0f); // [Part 9] Allocate draw channels and setup background cliprect TableSetupDrawChannels(table); // [Part 10] Hit testing on borders if (table->Flags & ImGuiTableFlags_Resizable) TableUpdateBorders(table); table->LastFirstRowHeight = 0.0f; table->IsLayoutLocked = true; table->IsUsingHeaders = false; // [Part 11] Context menu if (table->IsContextPopupOpen && table->InstanceCurrent == table->InstanceInteracted) { const ImGuiID context_menu_id = ImHashStr("##ContextMenu", 0, table->ID); if (BeginPopupEx(context_menu_id, ImGuiWindowFlags_AlwaysAutoResize | ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoSavedSettings)) { TableDrawContextMenu(table); EndPopup(); } else { table->IsContextPopupOpen = false; } } // [Part 13] Sanitize and build sort specs before we have a change to use them for display. // This path will only be exercised when sort specs are modified before header rows (e.g. init or visibility change) if (table->IsSortSpecsDirty && (table->Flags & ImGuiTableFlags_Sortable)) TableSortSpecsBuild(table); // Initial state ImGuiWindow* inner_window = table->InnerWindow; if (table->Flags & ImGuiTableFlags_NoClip) table->DrawSplitter.SetCurrentChannel(inner_window->DrawList, TABLE_DRAW_CHANNEL_NOCLIP); else inner_window->DrawList->PushClipRect(inner_window->ClipRect.Min, inner_window->ClipRect.Max, false); } // Process hit-testing on resizing borders. Actual size change will be applied in EndTable() // - Set table->HoveredColumnBorder with a short delay/timer to reduce feedback noise // - Submit ahead of table contents and header, use ImGuiButtonFlags_AllowItemOverlap to prioritize widgets // overlapping the same area. void ImGui::TableUpdateBorders(ImGuiTable* table) { ImGuiContext& g = *GImGui; IM_ASSERT(table->Flags & ImGuiTableFlags_Resizable); // At this point OuterRect height may be zero or under actual final height, so we rely on temporal coherency and // use the final height from last frame. Because this is only affecting _interaction_ with columns, it is not // really problematic (whereas the actual visual will be displayed in EndTable() and using the current frame height). // Actual columns highlight/render will be performed in EndTable() and not be affected. const float hit_half_width = TABLE_RESIZE_SEPARATOR_HALF_THICKNESS; const float hit_y1 = table->OuterRect.Min.y; const float hit_y2_body = ImMax(table->OuterRect.Max.y, hit_y1 + table->LastOuterHeight); const float hit_y2_head = hit_y1 + table->LastFirstRowHeight; for (int order_n = 0; order_n < table->ColumnsCount; order_n++) { if (!(table->EnabledMaskByDisplayOrder & ((ImU64)1 << order_n))) continue; const int column_n = table->DisplayOrderToIndex[order_n]; ImGuiTableColumn* column = &table->Columns[column_n]; if (column->Flags & (ImGuiTableColumnFlags_NoResize | ImGuiTableColumnFlags_NoDirectResize_)) continue; // ImGuiTableFlags_NoBordersInBodyUntilResize will be honored in TableDrawBorders() const float border_y2_hit = (table->Flags & ImGuiTableFlags_NoBordersInBody) ? hit_y2_head : hit_y2_body; if ((table->Flags & ImGuiTableFlags_NoBordersInBody) && table->IsUsingHeaders == false) continue; if (table->FreezeColumnsCount > 0) if (column->MaxX < table->Columns[table->DisplayOrderToIndex[table->FreezeColumnsCount - 1]].MaxX) continue; ImGuiID column_id = TableGetColumnResizeID(table, column_n, table->InstanceCurrent); ImRect hit_rect(column->MaxX - hit_half_width, hit_y1, column->MaxX + hit_half_width, border_y2_hit); //GetForegroundDrawList()->AddRect(hit_rect.Min, hit_rect.Max, IM_COL32(255, 0, 0, 100)); KeepAliveID(column_id); bool hovered = false, held = false; bool pressed = ButtonBehavior(hit_rect, column_id, &hovered, &held, ImGuiButtonFlags_FlattenChildren | ImGuiButtonFlags_AllowItemOverlap | ImGuiButtonFlags_PressedOnClick | ImGuiButtonFlags_PressedOnDoubleClick); if (pressed && IsMouseDoubleClicked(0)) { TableSetColumnWidthAutoSingle(table, column_n); ClearActiveID(); held = hovered = false; } if (held) { if (table->LastResizedColumn == -1) table->ResizeLockMinContentsX2 = table->RightMostEnabledColumn != -1 ? table->Columns[table->RightMostEnabledColumn].MaxX : -FLT_MAX; table->ResizedColumn = (ImGuiTableColumnIdx)column_n; table->InstanceInteracted = table->InstanceCurrent; } if ((hovered && g.HoveredIdTimer > TABLE_RESIZE_SEPARATOR_FEEDBACK_TIMER) || held) { table->HoveredColumnBorder = (ImGuiTableColumnIdx)column_n; SetMouseCursor(ImGuiMouseCursor_ResizeEW); } } } void ImGui::EndTable() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL && "Only call EndTable() if BeginTable() returns true!"); // This assert would be very useful to catch a common error... unfortunately it would probably trigger in some // cases, and for consistency user may sometimes output empty tables (and still benefit from e.g. outer border) //IM_ASSERT(table->IsLayoutLocked && "Table unused: never called TableNextRow(), is that the intent?"); // If the user never got to call TableNextRow() or TableNextColumn(), we call layout ourselves to ensure all our // code paths are consistent (instead of just hoping that TableBegin/TableEnd will work), get borders drawn, etc. if (!table->IsLayoutLocked) TableUpdateLayout(table); const ImGuiTableFlags flags = table->Flags; ImGuiWindow* inner_window = table->InnerWindow; ImGuiWindow* outer_window = table->OuterWindow; IM_ASSERT(inner_window == g.CurrentWindow); IM_ASSERT(outer_window == inner_window || outer_window == inner_window->ParentWindow); if (table->IsInsideRow) TableEndRow(table); // Context menu in columns body if (flags & ImGuiTableFlags_ContextMenuInBody) if (table->HoveredColumnBody != -1 && !IsAnyItemHovered() && IsMouseReleased(ImGuiMouseButton_Right)) TableOpenContextMenu((int)table->HoveredColumnBody); // Finalize table height inner_window->DC.PrevLineSize = table->HostBackupPrevLineSize; inner_window->DC.CurrLineSize = table->HostBackupCurrLineSize; inner_window->DC.CursorMaxPos = table->HostBackupCursorMaxPos; const float inner_content_max_y = table->RowPosY2; IM_ASSERT(table->RowPosY2 == inner_window->DC.CursorPos.y); if (inner_window != outer_window) inner_window->DC.CursorMaxPos.y = inner_content_max_y; else if (!(flags & ImGuiTableFlags_NoHostExtendY)) table->OuterRect.Max.y = table->InnerRect.Max.y = ImMax(table->OuterRect.Max.y, inner_content_max_y); // Patch OuterRect/InnerRect height table->WorkRect.Max.y = ImMax(table->WorkRect.Max.y, table->OuterRect.Max.y); table->LastOuterHeight = table->OuterRect.GetHeight(); // Setup inner scrolling range // FIXME: This ideally should be done earlier, in BeginTable() SetNextWindowContentSize call, just like writing to inner_window->DC.CursorMaxPos.y, // but since the later is likely to be impossible to do we'd rather update both axises together. if (table->Flags & ImGuiTableFlags_ScrollX) { const float outer_padding_for_border = (table->Flags & ImGuiTableFlags_BordersOuterV) ? TABLE_BORDER_SIZE : 0.0f; float max_pos_x = table->InnerWindow->DC.CursorMaxPos.x; if (table->RightMostEnabledColumn != -1) max_pos_x = ImMax(max_pos_x, table->Columns[table->RightMostEnabledColumn].WorkMaxX + table->CellPaddingX + table->OuterPaddingX - outer_padding_for_border); if (table->ResizedColumn != -1) max_pos_x = ImMax(max_pos_x, table->ResizeLockMinContentsX2); table->InnerWindow->DC.CursorMaxPos.x = max_pos_x; } // Pop clipping rect if (!(flags & ImGuiTableFlags_NoClip)) inner_window->DrawList->PopClipRect(); inner_window->ClipRect = inner_window->DrawList->_ClipRectStack.back(); // Draw borders if ((flags & ImGuiTableFlags_Borders) != 0) TableDrawBorders(table); #if 0 // Strip out dummy channel draw calls // We have no way to prevent user submitting direct ImDrawList calls into a hidden column (but ImGui:: calls will be clipped out) // Pros: remove draw calls which will have no effect. since they'll have zero-size cliprect they may be early out anyway. // Cons: making it harder for users watching metrics/debugger to spot the wasted vertices. if (table->DummyDrawChannel != (ImGuiTableColumnIdx)-1) { ImDrawChannel* dummy_channel = &table->DrawSplitter._Channels[table->DummyDrawChannel]; dummy_channel->_CmdBuffer.resize(0); dummy_channel->_IdxBuffer.resize(0); } #endif // Flatten channels and merge draw calls table->DrawSplitter.SetCurrentChannel(inner_window->DrawList, 0); if ((table->Flags & ImGuiTableFlags_NoClip) == 0) TableMergeDrawChannels(table); table->DrawSplitter.Merge(inner_window->DrawList); // Update ColumnsAutoFitWidth to get us ahead for host using our size to auto-resize without waiting for next BeginTable() const float width_spacings = (table->OuterPaddingX * 2.0f) + (table->CellSpacingX1 + table->CellSpacingX2) * (table->ColumnsEnabledCount - 1); table->ColumnsAutoFitWidth = width_spacings + (table->CellPaddingX * 2.0f) * table->ColumnsEnabledCount; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) if (table->EnabledMaskByIndex & ((ImU64)1 << column_n)) { ImGuiTableColumn* column = &table->Columns[column_n]; if ((column->Flags & ImGuiTableColumnFlags_WidthFixed) && !(column->Flags & ImGuiTableColumnFlags_NoResize)) table->ColumnsAutoFitWidth += column->WidthRequest; else table->ColumnsAutoFitWidth += TableGetColumnWidthAuto(table, column); } // Update scroll if ((table->Flags & ImGuiTableFlags_ScrollX) == 0 && inner_window != outer_window) { inner_window->Scroll.x = 0.0f; } else if (table->LastResizedColumn != -1 && table->ResizedColumn == -1 && inner_window->ScrollbarX && table->InstanceInteracted == table->InstanceCurrent) { // When releasing a column being resized, scroll to keep the resulting column in sight const float neighbor_width_to_keep_visible = table->MinColumnWidth + table->CellPaddingX * 2.0f; ImGuiTableColumn* column = &table->Columns[table->LastResizedColumn]; if (column->MaxX < table->InnerClipRect.Min.x) SetScrollFromPosX(inner_window, column->MaxX - inner_window->Pos.x - neighbor_width_to_keep_visible, 1.0f); else if (column->MaxX > table->InnerClipRect.Max.x) SetScrollFromPosX(inner_window, column->MaxX - inner_window->Pos.x + neighbor_width_to_keep_visible, 1.0f); } // Apply resizing/dragging at the end of the frame if (table->ResizedColumn != -1 && table->InstanceCurrent == table->InstanceInteracted) { ImGuiTableColumn* column = &table->Columns[table->ResizedColumn]; const float new_x2 = (g.IO.MousePos.x - g.ActiveIdClickOffset.x + TABLE_RESIZE_SEPARATOR_HALF_THICKNESS); const float new_width = ImFloor(new_x2 - column->MinX - table->CellSpacingX1 - table->CellPaddingX * 2.0f); table->ResizedColumnNextWidth = new_width; } // Pop from id stack IM_ASSERT_USER_ERROR(inner_window->IDStack.back() == table->ID + table->InstanceCurrent, "Mismatching PushID/PopID!"); IM_ASSERT_USER_ERROR(outer_window->DC.ItemWidthStack.Size >= table->HostBackupItemWidthStackSize, "Too many PopItemWidth!"); PopID(); // Restore window data that we modified const ImVec2 backup_outer_max_pos = outer_window->DC.CursorMaxPos; inner_window->WorkRect = table->HostBackupWorkRect; inner_window->ParentWorkRect = table->HostBackupParentWorkRect; inner_window->SkipItems = table->HostSkipItems; outer_window->DC.CursorPos = table->OuterRect.Min; outer_window->DC.ItemWidth = table->HostBackupItemWidth; outer_window->DC.ItemWidthStack.Size = table->HostBackupItemWidthStackSize; outer_window->DC.ColumnsOffset = table->HostBackupColumnsOffset; // Layout in outer window // (FIXME: To allow auto-fit and allow desirable effect of SameLine() we dissociate 'used' vs 'ideal' size by overriding // CursorPosPrevLine and CursorMaxPos manually. That should be a more general layout feature, see same problem e.g. #3414) if (inner_window != outer_window) { EndChild(); } else { ItemSize(table->OuterRect.GetSize()); ItemAdd(table->OuterRect, 0); } // Override declared contents width/height to enable auto-resize while not needlessly adding a scrollbar if (table->Flags & ImGuiTableFlags_NoHostExtendX) { // FIXME-TABLE: Could we remove this section? // ColumnsAutoFitWidth may be one frame ahead here since for Fixed+NoResize is calculated from latest contents IM_ASSERT((table->Flags & ImGuiTableFlags_ScrollX) == 0); outer_window->DC.CursorMaxPos.x = ImMax(backup_outer_max_pos.x, table->OuterRect.Min.x + table->ColumnsAutoFitWidth); } else if (table->UserOuterSize.x <= 0.0f) { const float decoration_size = (table->Flags & ImGuiTableFlags_ScrollX) ? inner_window->ScrollbarSizes.x : 0.0f; outer_window->DC.IdealMaxPos.x = ImMax(outer_window->DC.IdealMaxPos.x, table->OuterRect.Min.x + table->ColumnsAutoFitWidth + decoration_size - table->UserOuterSize.x); outer_window->DC.CursorMaxPos.x = ImMax(backup_outer_max_pos.x, ImMin(table->OuterRect.Max.x, table->OuterRect.Min.x + table->ColumnsAutoFitWidth)); } else { outer_window->DC.CursorMaxPos.x = ImMax(backup_outer_max_pos.x, table->OuterRect.Max.x); } if (table->UserOuterSize.y <= 0.0f) { const float decoration_size = (table->Flags & ImGuiTableFlags_ScrollY) ? inner_window->ScrollbarSizes.y : 0.0f; outer_window->DC.IdealMaxPos.y = ImMax(outer_window->DC.IdealMaxPos.y, inner_content_max_y + decoration_size - table->UserOuterSize.y); outer_window->DC.CursorMaxPos.y = ImMax(backup_outer_max_pos.y, ImMin(table->OuterRect.Max.y, inner_content_max_y)); } else { // OuterRect.Max.y may already have been pushed downward from the initial value (unless ImGuiTableFlags_NoHostExtendY is set) outer_window->DC.CursorMaxPos.y = ImMax(backup_outer_max_pos.y, table->OuterRect.Max.y); } // Save settings if (table->IsSettingsDirty) TableSaveSettings(table); table->IsInitializing = false; // Clear or restore current table, if any IM_ASSERT(g.CurrentWindow == outer_window && g.CurrentTable == table); g.CurrentTableStack.pop_back(); g.CurrentTable = g.CurrentTableStack.Size ? g.Tables.GetByIndex(g.CurrentTableStack.back().Index) : NULL; outer_window->DC.CurrentTableIdx = g.CurrentTable ? g.Tables.GetIndex(g.CurrentTable) : -1; } // See "COLUMN SIZING POLICIES" comments at the top of this file // If (init_width_or_weight <= 0.0f) it is ignored void ImGui::TableSetupColumn(const char* label, ImGuiTableColumnFlags flags, float init_width_or_weight, ImGuiID user_id) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL && "Need to call TableSetupColumn() after BeginTable()!"); IM_ASSERT(table->IsLayoutLocked == false && "Need to call call TableSetupColumn() before first row!"); IM_ASSERT((flags & ImGuiTableColumnFlags_StatusMask_) == 0 && "Illegal to pass StatusMask values to TableSetupColumn()"); if (table->DeclColumnsCount >= table->ColumnsCount) { IM_ASSERT_USER_ERROR(table->DeclColumnsCount < table->ColumnsCount, "Called TableSetupColumn() too many times!"); return; } ImGuiTableColumn* column = &table->Columns[table->DeclColumnsCount]; table->DeclColumnsCount++; // Assert when passing a width or weight if policy is entirely left to default, to avoid storing width into weight and vice-versa. // Give a grace to users of ImGuiTableFlags_ScrollX. if (table->IsDefaultSizingPolicy && (flags & ImGuiTableColumnFlags_WidthMask_) == 0 && (flags & ImGuiTableFlags_ScrollX) == 0) IM_ASSERT(init_width_or_weight <= 0.0f && "Can only specify width/weight if sizing policy is set explicitly in either Table or Column."); // When passing a width automatically enforce WidthFixed policy // (whereas TableSetupColumnFlags would default to WidthAuto if table is not Resizable) if ((flags & ImGuiTableColumnFlags_WidthMask_) == 0 && init_width_or_weight > 0.0f) if ((table->Flags & ImGuiTableFlags_SizingMask_) == ImGuiTableFlags_SizingFixedFit || (table->Flags & ImGuiTableFlags_SizingMask_) == ImGuiTableFlags_SizingFixedSame) flags |= ImGuiTableColumnFlags_WidthFixed; TableSetupColumnFlags(table, column, flags); column->UserID = user_id; flags = column->Flags; // Initialize defaults column->InitStretchWeightOrWidth = init_width_or_weight; if (table->IsInitializing) { // Init width or weight if (column->WidthRequest < 0.0f && column->StretchWeight < 0.0f) { if ((flags & ImGuiTableColumnFlags_WidthFixed) && init_width_or_weight > 0.0f) column->WidthRequest = init_width_or_weight; if (flags & ImGuiTableColumnFlags_WidthStretch) column->StretchWeight = (init_width_or_weight > 0.0f) ? init_width_or_weight : -1.0f; // Disable auto-fit if an explicit width/weight has been specified if (init_width_or_weight > 0.0f) column->AutoFitQueue = 0x00; } // Init default visibility/sort state if ((flags & ImGuiTableColumnFlags_DefaultHide) && (table->SettingsLoadedFlags & ImGuiTableFlags_Hideable) == 0) column->IsEnabled = column->IsEnabledNextFrame = false; if (flags & ImGuiTableColumnFlags_DefaultSort && (table->SettingsLoadedFlags & ImGuiTableFlags_Sortable) == 0) { column->SortOrder = 0; // Multiple columns using _DefaultSort will be reassigned unique SortOrder values when building the sort specs. column->SortDirection = (column->Flags & ImGuiTableColumnFlags_PreferSortDescending) ? (ImS8)ImGuiSortDirection_Descending : (ImU8)(ImGuiSortDirection_Ascending); } } // Store name (append with zero-terminator in contiguous buffer) column->NameOffset = -1; if (label != NULL && label[0] != 0) { column->NameOffset = (ImS16)table->ColumnsNames.size(); table->ColumnsNames.append(label, label + strlen(label) + 1); } } // [Public] void ImGui::TableSetupScrollFreeze(int columns, int rows) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL && "Need to call TableSetupColumn() after BeginTable()!"); IM_ASSERT(table->IsLayoutLocked == false && "Need to call TableSetupColumn() before first row!"); IM_ASSERT(columns >= 0 && columns < IMGUI_TABLE_MAX_COLUMNS); IM_ASSERT(rows >= 0 && rows < 128); // Arbitrary limit table->FreezeColumnsRequest = (table->Flags & ImGuiTableFlags_ScrollX) ? (ImGuiTableColumnIdx)columns : 0; table->FreezeColumnsCount = (table->InnerWindow->Scroll.x != 0.0f) ? table->FreezeColumnsRequest : 0; table->FreezeRowsRequest = (table->Flags & ImGuiTableFlags_ScrollY) ? (ImGuiTableColumnIdx)rows : 0; table->FreezeRowsCount = (table->InnerWindow->Scroll.y != 0.0f) ? table->FreezeRowsRequest : 0; table->IsUnfrozenRows = (table->FreezeRowsCount == 0); // Make sure this is set before TableUpdateLayout() so ImGuiListClipper can benefit from it.b } int ImGui::TableGetColumnCount() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; return table ? table->ColumnsCount : 0; } const char* ImGui::TableGetColumnName(int column_n) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return NULL; if (column_n < 0) column_n = table->CurrentColumn; return TableGetColumnName(table, column_n); } const char* ImGui::TableGetColumnName(const ImGuiTable* table, int column_n) { if (table->IsLayoutLocked == false && column_n >= table->DeclColumnsCount) return ""; // NameOffset is invalid at this point const ImGuiTableColumn* column = &table->Columns[column_n]; if (column->NameOffset == -1) return ""; return &table->ColumnsNames.Buf[column->NameOffset]; } // For the getter you can use (TableGetColumnFlags() & ImGuiTableColumnFlags_IsEnabled) void ImGui::TableSetColumnEnabled(int column_n, bool enabled) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL); if (!table) return; if (column_n < 0) column_n = table->CurrentColumn; IM_ASSERT(column_n >= 0 && column_n < table->ColumnsCount); ImGuiTableColumn* column = &table->Columns[column_n]; column->IsEnabledNextFrame = enabled; } // We allow querying for an extra column in order to poll the IsHovered state of the right-most section ImGuiTableColumnFlags ImGui::TableGetColumnFlags(int column_n) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return ImGuiTableColumnFlags_None; if (column_n < 0) column_n = table->CurrentColumn; if (column_n == table->ColumnsCount) return (table->HoveredColumnBody == column_n) ? ImGuiTableColumnFlags_IsHovered : ImGuiTableColumnFlags_None; return table->Columns[column_n].Flags; } // Return the cell rectangle based on currently known height. // - Important: we generally don't know our row height until the end of the row, so Max.y will be incorrect in many situations. // The only case where this is correct is if we provided a min_row_height to TableNextRow() and don't go below it. // - Important: if ImGuiTableFlags_PadOuterX is set but ImGuiTableFlags_PadInnerX is not set, the outer-most left and right // columns report a small offset so their CellBgRect can extend up to the outer border. ImRect ImGui::TableGetCellBgRect(const ImGuiTable* table, int column_n) { const ImGuiTableColumn* column = &table->Columns[column_n]; float x1 = column->MinX; float x2 = column->MaxX; if (column->PrevEnabledColumn == -1) x1 -= table->CellSpacingX1; if (column->NextEnabledColumn == -1) x2 += table->CellSpacingX2; return ImRect(x1, table->RowPosY1, x2, table->RowPosY2); } // Return the resizing ID for the right-side of the given column. ImGuiID ImGui::TableGetColumnResizeID(const ImGuiTable* table, int column_n, int instance_no) { IM_ASSERT(column_n >= 0 && column_n < table->ColumnsCount); ImGuiID id = table->ID + 1 + (instance_no * table->ColumnsCount) + column_n; return id; } // Return -1 when table is not hovered. return columns_count if the unused space at the right of visible columns is hovered. int ImGui::TableGetHoveredColumn() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return -1; return (int)table->HoveredColumnBody; } void ImGui::TableSetBgColor(ImGuiTableBgTarget target, ImU32 color, int column_n) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(target != ImGuiTableBgTarget_None); if (color == IM_COL32_DISABLE) color = 0; // We cannot draw neither the cell or row background immediately as we don't know the row height at this point in time. switch (target) { case ImGuiTableBgTarget_CellBg: { if (table->RowPosY1 > table->InnerClipRect.Max.y) // Discard return; if (column_n == -1) column_n = table->CurrentColumn; if ((table->VisibleMaskByIndex & ((ImU64)1 << column_n)) == 0) return; if (table->RowCellDataCurrent < 0 || table->RowCellData[table->RowCellDataCurrent].Column != column_n) table->RowCellDataCurrent++; ImGuiTableCellData* cell_data = &table->RowCellData[table->RowCellDataCurrent]; cell_data->BgColor = color; cell_data->Column = (ImGuiTableColumnIdx)column_n; break; } case ImGuiTableBgTarget_RowBg0: case ImGuiTableBgTarget_RowBg1: { if (table->RowPosY1 > table->InnerClipRect.Max.y) // Discard return; IM_ASSERT(column_n == -1); int bg_idx = (target == ImGuiTableBgTarget_RowBg1) ? 1 : 0; table->RowBgColor[bg_idx] = color; break; } default: IM_ASSERT(0); } } //------------------------------------------------------------------------- // [SECTION] Tables: Row changes //------------------------------------------------------------------------- // - TableGetRowIndex() // - TableNextRow() // - TableBeginRow() [Internal] // - TableEndRow() [Internal] //------------------------------------------------------------------------- // [Public] Note: for row coloring we use ->RowBgColorCounter which is the same value without counting header rows int ImGui::TableGetRowIndex() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return 0; return table->CurrentRow; } // [Public] Starts into the first cell of a new row void ImGui::TableNextRow(ImGuiTableRowFlags row_flags, float row_min_height) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table->IsLayoutLocked) TableUpdateLayout(table); if (table->IsInsideRow) TableEndRow(table); table->LastRowFlags = table->RowFlags; table->RowFlags = row_flags; table->RowMinHeight = row_min_height; TableBeginRow(table); // We honor min_row_height requested by user, but cannot guarantee per-row maximum height, // because that would essentially require a unique clipping rectangle per-cell. table->RowPosY2 += table->CellPaddingY * 2.0f; table->RowPosY2 = ImMax(table->RowPosY2, table->RowPosY1 + row_min_height); // Disable output until user calls TableNextColumn() table->InnerWindow->SkipItems = true; } // [Internal] Called by TableNextRow() void ImGui::TableBeginRow(ImGuiTable* table) { ImGuiWindow* window = table->InnerWindow; IM_ASSERT(!table->IsInsideRow); // New row table->CurrentRow++; table->CurrentColumn = -1; table->RowBgColor[0] = table->RowBgColor[1] = IM_COL32_DISABLE; table->RowCellDataCurrent = -1; table->IsInsideRow = true; // Begin frozen rows float next_y1 = table->RowPosY2; if (table->CurrentRow == 0 && table->FreezeRowsCount > 0) next_y1 = window->DC.CursorPos.y = table->OuterRect.Min.y; table->RowPosY1 = table->RowPosY2 = next_y1; table->RowTextBaseline = 0.0f; table->RowIndentOffsetX = window->DC.Indent.x - table->HostIndentX; // Lock indent window->DC.PrevLineTextBaseOffset = 0.0f; window->DC.CursorMaxPos.y = next_y1; // Making the header BG color non-transparent will allow us to overlay it multiple times when handling smooth dragging. if (table->RowFlags & ImGuiTableRowFlags_Headers) { TableSetBgColor(ImGuiTableBgTarget_RowBg0, GetColorU32(ImGuiCol_TableHeaderBg)); if (table->CurrentRow == 0) table->IsUsingHeaders = true; } } // [Internal] Called by TableNextRow() void ImGui::TableEndRow(ImGuiTable* table) { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; IM_ASSERT(window == table->InnerWindow); IM_ASSERT(table->IsInsideRow); if (table->CurrentColumn != -1) TableEndCell(table); // Logging if (g.LogEnabled) LogRenderedText(NULL, "|"); // Position cursor at the bottom of our row so it can be used for e.g. clipping calculation. However it is // likely that the next call to TableBeginCell() will reposition the cursor to take account of vertical padding. window->DC.CursorPos.y = table->RowPosY2; // Row background fill const float bg_y1 = table->RowPosY1; const float bg_y2 = table->RowPosY2; const bool unfreeze_rows_actual = (table->CurrentRow + 1 == table->FreezeRowsCount); const bool unfreeze_rows_request = (table->CurrentRow + 1 == table->FreezeRowsRequest); if (table->CurrentRow == 0) table->LastFirstRowHeight = bg_y2 - bg_y1; const bool is_visible = (bg_y2 >= table->InnerClipRect.Min.y && bg_y1 <= table->InnerClipRect.Max.y); if (is_visible) { // Decide of background color for the row ImU32 bg_col0 = 0; ImU32 bg_col1 = 0; if (table->RowBgColor[0] != IM_COL32_DISABLE) bg_col0 = table->RowBgColor[0]; else if (table->Flags & ImGuiTableFlags_RowBg) bg_col0 = GetColorU32((table->RowBgColorCounter & 1) ? ImGuiCol_TableRowBgAlt : ImGuiCol_TableRowBg); if (table->RowBgColor[1] != IM_COL32_DISABLE) bg_col1 = table->RowBgColor[1]; // Decide of top border color ImU32 border_col = 0; const float border_size = TABLE_BORDER_SIZE; if (table->CurrentRow > 0 || table->InnerWindow == table->OuterWindow) if (table->Flags & ImGuiTableFlags_BordersInnerH) border_col = (table->LastRowFlags & ImGuiTableRowFlags_Headers) ? table->BorderColorStrong : table->BorderColorLight; const bool draw_cell_bg_color = table->RowCellDataCurrent >= 0; const bool draw_strong_bottom_border = unfreeze_rows_actual; if ((bg_col0 | bg_col1 | border_col) != 0 || draw_strong_bottom_border || draw_cell_bg_color) { // In theory we could call SetWindowClipRectBeforeSetChannel() but since we know TableEndRow() is // always followed by a change of clipping rectangle we perform the smallest overwrite possible here. if ((table->Flags & ImGuiTableFlags_NoClip) == 0) window->DrawList->_CmdHeader.ClipRect = table->Bg0ClipRectForDrawCmd.ToVec4(); table->DrawSplitter.SetCurrentChannel(window->DrawList, TABLE_DRAW_CHANNEL_BG0); } // Draw row background // We soft/cpu clip this so all backgrounds and borders can share the same clipping rectangle if (bg_col0 || bg_col1) { ImRect row_rect(table->WorkRect.Min.x, bg_y1, table->WorkRect.Max.x, bg_y2); row_rect.ClipWith(table->BgClipRect); if (bg_col0 != 0 && row_rect.Min.y < row_rect.Max.y) window->DrawList->AddRectFilled(row_rect.Min, row_rect.Max, bg_col0); if (bg_col1 != 0 && row_rect.Min.y < row_rect.Max.y) window->DrawList->AddRectFilled(row_rect.Min, row_rect.Max, bg_col1); } // Draw cell background color if (draw_cell_bg_color) { ImGuiTableCellData* cell_data_end = &table->RowCellData[table->RowCellDataCurrent]; for (ImGuiTableCellData* cell_data = &table->RowCellData[0]; cell_data <= cell_data_end; cell_data++) { const ImGuiTableColumn* column = &table->Columns[cell_data->Column]; ImRect cell_bg_rect = TableGetCellBgRect(table, cell_data->Column); cell_bg_rect.ClipWith(table->BgClipRect); cell_bg_rect.Min.x = ImMax(cell_bg_rect.Min.x, column->ClipRect.Min.x); // So that first column after frozen one gets clipped cell_bg_rect.Max.x = ImMin(cell_bg_rect.Max.x, column->MaxX); window->DrawList->AddRectFilled(cell_bg_rect.Min, cell_bg_rect.Max, cell_data->BgColor); } } // Draw top border if (border_col && bg_y1 >= table->BgClipRect.Min.y && bg_y1 < table->BgClipRect.Max.y) window->DrawList->AddLine(ImVec2(table->BorderX1, bg_y1), ImVec2(table->BorderX2, bg_y1), border_col, border_size); // Draw bottom border at the row unfreezing mark (always strong) if (draw_strong_bottom_border && bg_y2 >= table->BgClipRect.Min.y && bg_y2 < table->BgClipRect.Max.y) window->DrawList->AddLine(ImVec2(table->BorderX1, bg_y2), ImVec2(table->BorderX2, bg_y2), table->BorderColorStrong, border_size); } // End frozen rows (when we are past the last frozen row line, teleport cursor and alter clipping rectangle) // We need to do that in TableEndRow() instead of TableBeginRow() so the list clipper can mark end of row and // get the new cursor position. if (unfreeze_rows_request) for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; column->NavLayerCurrent = (ImS8)((column_n < table->FreezeColumnsCount) ? ImGuiNavLayer_Menu : ImGuiNavLayer_Main); } if (unfreeze_rows_actual) { IM_ASSERT(table->IsUnfrozenRows == false); table->IsUnfrozenRows = true; // BgClipRect starts as table->InnerClipRect, reduce it now and make BgClipRectForDrawCmd == BgClipRect float y0 = ImMax(table->RowPosY2 + 1, window->InnerClipRect.Min.y); table->BgClipRect.Min.y = table->Bg2ClipRectForDrawCmd.Min.y = ImMin(y0, window->InnerClipRect.Max.y); table->BgClipRect.Max.y = table->Bg2ClipRectForDrawCmd.Max.y = window->InnerClipRect.Max.y; table->Bg2DrawChannelCurrent = table->Bg2DrawChannelUnfrozen; IM_ASSERT(table->Bg2ClipRectForDrawCmd.Min.y <= table->Bg2ClipRectForDrawCmd.Max.y); float row_height = table->RowPosY2 - table->RowPosY1; table->RowPosY2 = window->DC.CursorPos.y = table->WorkRect.Min.y + table->RowPosY2 - table->OuterRect.Min.y; table->RowPosY1 = table->RowPosY2 - row_height; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; column->DrawChannelCurrent = column->DrawChannelUnfrozen; column->ClipRect.Min.y = table->Bg2ClipRectForDrawCmd.Min.y; } // Update cliprect ahead of TableBeginCell() so clipper can access to new ClipRect->Min.y SetWindowClipRectBeforeSetChannel(window, table->Columns[0].ClipRect); table->DrawSplitter.SetCurrentChannel(window->DrawList, table->Columns[0].DrawChannelCurrent); } if (!(table->RowFlags & ImGuiTableRowFlags_Headers)) table->RowBgColorCounter++; table->IsInsideRow = false; } //------------------------------------------------------------------------- // [SECTION] Tables: Columns changes //------------------------------------------------------------------------- // - TableGetColumnIndex() // - TableSetColumnIndex() // - TableNextColumn() // - TableBeginCell() [Internal] // - TableEndCell() [Internal] //------------------------------------------------------------------------- int ImGui::TableGetColumnIndex() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return 0; return table->CurrentColumn; } // [Public] Append into a specific column bool ImGui::TableSetColumnIndex(int column_n) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return false; if (table->CurrentColumn != column_n) { if (table->CurrentColumn != -1) TableEndCell(table); IM_ASSERT(column_n >= 0 && table->ColumnsCount); TableBeginCell(table, column_n); } // Return whether the column is visible. User may choose to skip submitting items based on this return value, // however they shouldn't skip submitting for columns that may have the tallest contribution to row height. return (table->RequestOutputMaskByIndex & ((ImU64)1 << column_n)) != 0; } // [Public] Append into the next column, wrap and create a new row when already on last column bool ImGui::TableNextColumn() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!table) return false; if (table->IsInsideRow && table->CurrentColumn + 1 < table->ColumnsCount) { if (table->CurrentColumn != -1) TableEndCell(table); TableBeginCell(table, table->CurrentColumn + 1); } else { TableNextRow(); TableBeginCell(table, 0); } // Return whether the column is visible. User may choose to skip submitting items based on this return value, // however they shouldn't skip submitting for columns that may have the tallest contribution to row height. int column_n = table->CurrentColumn; return (table->RequestOutputMaskByIndex & ((ImU64)1 << column_n)) != 0; } // [Internal] Called by TableSetColumnIndex()/TableNextColumn() // This is called very frequently, so we need to be mindful of unnecessary overhead. // FIXME-TABLE FIXME-OPT: Could probably shortcut some things for non-active or clipped columns. void ImGui::TableBeginCell(ImGuiTable* table, int column_n) { ImGuiTableColumn* column = &table->Columns[column_n]; ImGuiWindow* window = table->InnerWindow; table->CurrentColumn = column_n; // Start position is roughly ~~ CellRect.Min + CellPadding + Indent float start_x = column->WorkMinX; if (column->Flags & ImGuiTableColumnFlags_IndentEnable) start_x += table->RowIndentOffsetX; // ~~ += window.DC.Indent.x - table->HostIndentX, except we locked it for the row. window->DC.CursorPos.x = start_x; window->DC.CursorPos.y = table->RowPosY1 + table->CellPaddingY; window->DC.CursorMaxPos.x = window->DC.CursorPos.x; window->DC.ColumnsOffset.x = start_x - window->Pos.x - window->DC.Indent.x; // FIXME-WORKRECT window->DC.CurrLineTextBaseOffset = table->RowTextBaseline; window->DC.NavLayerCurrent = (ImGuiNavLayer)column->NavLayerCurrent; window->WorkRect.Min.y = window->DC.CursorPos.y; window->WorkRect.Min.x = column->WorkMinX; window->WorkRect.Max.x = column->WorkMaxX; window->DC.ItemWidth = column->ItemWidth; // To allow ImGuiListClipper to function we propagate our row height if (!column->IsEnabled) window->DC.CursorPos.y = ImMax(window->DC.CursorPos.y, table->RowPosY2); window->SkipItems = column->IsSkipItems; if (column->IsSkipItems) { window->DC.LastItemId = 0; window->DC.LastItemStatusFlags = 0; } if (table->Flags & ImGuiTableFlags_NoClip) { // FIXME: if we end up drawing all borders/bg in EndTable, could remove this and just assert that channel hasn't changed. table->DrawSplitter.SetCurrentChannel(window->DrawList, TABLE_DRAW_CHANNEL_NOCLIP); //IM_ASSERT(table->DrawSplitter._Current == TABLE_DRAW_CHANNEL_NOCLIP); } else { // FIXME-TABLE: Could avoid this if draw channel is dummy channel? SetWindowClipRectBeforeSetChannel(window, column->ClipRect); table->DrawSplitter.SetCurrentChannel(window->DrawList, column->DrawChannelCurrent); } // Logging ImGuiContext& g = *GImGui; if (g.LogEnabled && !column->IsSkipItems) { LogRenderedText(&window->DC.CursorPos, "|"); g.LogLinePosY = FLT_MAX; } } // [Internal] Called by TableNextRow()/TableSetColumnIndex()/TableNextColumn() void ImGui::TableEndCell(ImGuiTable* table) { ImGuiTableColumn* column = &table->Columns[table->CurrentColumn]; ImGuiWindow* window = table->InnerWindow; // Report maximum position so we can infer content size per column. float* p_max_pos_x; if (table->RowFlags & ImGuiTableRowFlags_Headers) p_max_pos_x = &column->ContentMaxXHeadersUsed; // Useful in case user submit contents in header row that is not a TableHeader() call else p_max_pos_x = table->IsUnfrozenRows ? &column->ContentMaxXUnfrozen : &column->ContentMaxXFrozen; *p_max_pos_x = ImMax(*p_max_pos_x, window->DC.CursorMaxPos.x); table->RowPosY2 = ImMax(table->RowPosY2, window->DC.CursorMaxPos.y + table->CellPaddingY); column->ItemWidth = window->DC.ItemWidth; // Propagate text baseline for the entire row // FIXME-TABLE: Here we propagate text baseline from the last line of the cell.. instead of the first one. table->RowTextBaseline = ImMax(table->RowTextBaseline, window->DC.PrevLineTextBaseOffset); } //------------------------------------------------------------------------- // [SECTION] Tables: Columns width management //------------------------------------------------------------------------- // - TableGetMaxColumnWidth() [Internal] // - TableGetColumnWidthAuto() [Internal] // - TableSetColumnWidth() // - TableSetColumnWidthAutoSingle() [Internal] // - TableSetColumnWidthAutoAll() [Internal] // - TableUpdateColumnsWeightFromWidth() [Internal] //------------------------------------------------------------------------- // Maximum column content width given current layout. Use column->MinX so this value on a per-column basis. float ImGui::TableGetMaxColumnWidth(const ImGuiTable* table, int column_n) { const ImGuiTableColumn* column = &table->Columns[column_n]; float max_width = FLT_MAX; const float min_column_distance = table->MinColumnWidth + table->CellPaddingX * 2.0f + table->CellSpacingX1 + table->CellSpacingX2; if (table->Flags & ImGuiTableFlags_ScrollX) { // Frozen columns can't reach beyond visible width else scrolling will naturally break. if (column->DisplayOrder < table->FreezeColumnsRequest) { max_width = (table->InnerClipRect.Max.x - (table->FreezeColumnsRequest - column->DisplayOrder) * min_column_distance) - column->MinX; max_width = max_width - table->OuterPaddingX - table->CellPaddingX - table->CellSpacingX2; } } else if ((table->Flags & ImGuiTableFlags_NoKeepColumnsVisible) == 0) { // If horizontal scrolling if disabled, we apply a final lossless shrinking of columns in order to make // sure they are all visible. Because of this we also know that all of the columns will always fit in // table->WorkRect and therefore in table->InnerRect (because ScrollX is off) // FIXME-TABLE: This is solved incorrectly but also quite a difficult problem to fix as we also want ClipRect width to match. // See "table_width_distrib" and "table_width_keep_visible" tests max_width = table->WorkRect.Max.x - (table->ColumnsEnabledCount - column->IndexWithinEnabledSet - 1) * min_column_distance - column->MinX; //max_width -= table->CellSpacingX1; max_width -= table->CellSpacingX2; max_width -= table->CellPaddingX * 2.0f; max_width -= table->OuterPaddingX; } return max_width; } // Note this is meant to be stored in column->WidthAuto, please generally use the WidthAuto field float ImGui::TableGetColumnWidthAuto(ImGuiTable* table, ImGuiTableColumn* column) { const float content_width_body = ImMax(column->ContentMaxXFrozen, column->ContentMaxXUnfrozen) - column->WorkMinX; const float content_width_headers = column->ContentMaxXHeadersIdeal - column->WorkMinX; float width_auto = content_width_body; if (!(column->Flags & ImGuiTableColumnFlags_NoHeaderWidth)) width_auto = ImMax(width_auto, content_width_headers); // Non-resizable fixed columns preserve their requested width if ((column->Flags & ImGuiTableColumnFlags_WidthFixed) && column->InitStretchWeightOrWidth > 0.0f) if (!(table->Flags & ImGuiTableFlags_Resizable) || (column->Flags & ImGuiTableColumnFlags_NoResize)) width_auto = column->InitStretchWeightOrWidth; return ImMax(width_auto, table->MinColumnWidth); } // 'width' = inner column width, without padding void ImGui::TableSetColumnWidth(int column_n, float width) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL && table->IsLayoutLocked == false); IM_ASSERT(column_n >= 0 && column_n < table->ColumnsCount); ImGuiTableColumn* column_0 = &table->Columns[column_n]; float column_0_width = width; // Apply constraints early // Compare both requested and actual given width to avoid overwriting requested width when column is stuck (minimum size, bounded) IM_ASSERT(table->MinColumnWidth > 0.0f); const float min_width = table->MinColumnWidth; const float max_width = ImMax(min_width, TableGetMaxColumnWidth(table, column_n)); column_0_width = ImClamp(column_0_width, min_width, max_width); if (column_0->WidthGiven == column_0_width || column_0->WidthRequest == column_0_width) return; //IMGUI_DEBUG_LOG("TableSetColumnWidth(%d, %.1f->%.1f)\n", column_0_idx, column_0->WidthGiven, column_0_width); ImGuiTableColumn* column_1 = (column_0->NextEnabledColumn != -1) ? &table->Columns[column_0->NextEnabledColumn] : NULL; // In this surprisingly not simple because of how we support mixing Fixed and multiple Stretch columns. // - All fixed: easy. // - All stretch: easy. // - One or more fixed + one stretch: easy. // - One or more fixed + more than one stretch: tricky. // Qt when manual resize is enabled only support a single _trailing_ stretch column. // When forwarding resize from Wn| to Fn+1| we need to be considerate of the _NoResize flag on Fn+1. // FIXME-TABLE: Find a way to rewrite all of this so interactions feel more consistent for the user. // Scenarios: // - F1 F2 F3 resize from F1| or F2| --> ok: alter ->WidthRequested of Fixed column. Subsequent columns will be offset. // - F1 F2 F3 resize from F3| --> ok: alter ->WidthRequested of Fixed column. If active, ScrollX extent can be altered. // - F1 F2 W3 resize from F1| or F2| --> ok: alter ->WidthRequested of Fixed column. If active, ScrollX extent can be altered, but it doesn't make much sense as the Stretch column will always be minimal size. // - F1 F2 W3 resize from W3| --> ok: no-op (disabled by Resize Rule 1) // - W1 W2 W3 resize from W1| or W2| --> ok // - W1 W2 W3 resize from W3| --> ok: no-op (disabled by Resize Rule 1) // - W1 F2 F3 resize from F3| --> ok: no-op (disabled by Resize Rule 1) // - W1 F2 resize from F2| --> ok: no-op (disabled by Resize Rule 1) // - W1 W2 F3 resize from W1| or W2| --> ok // - W1 F2 W3 resize from W1| or F2| --> ok // - F1 W2 F3 resize from W2| --> ok // - F1 W3 F2 resize from W3| --> ok // - W1 F2 F3 resize from W1| --> ok: equivalent to resizing |F2. F3 will not move. // - W1 F2 F3 resize from F2| --> ok // All resizes from a Wx columns are locking other columns. // Possible improvements: // - W1 W2 W3 resize W1| --> to not be stuck, both W2 and W3 would stretch down. Seems possible to fix. Would be most beneficial to simplify resize of all-weighted columns. // - W3 F1 F2 resize W3| --> to not be stuck past F1|, both F1 and F2 would need to stretch down, which would be lossy or ambiguous. Seems hard to fix. // [Resize Rule 1] Can't resize from right of right-most visible column if there is any Stretch column. Implemented in TableUpdateLayout(). // If we have all Fixed columns OR resizing a Fixed column that doesn't come after a Stretch one, we can do an offsetting resize. // This is the preferred resize path if (column_0->Flags & ImGuiTableColumnFlags_WidthFixed) if (!column_1 || table->LeftMostStretchedColumn == -1 || table->Columns[table->LeftMostStretchedColumn].DisplayOrder >= column_0->DisplayOrder) { column_0->WidthRequest = column_0_width; table->IsSettingsDirty = true; return; } // We can also use previous column if there's no next one (this is used when doing an auto-fit on the right-most stretch column) if (column_1 == NULL) column_1 = (column_0->PrevEnabledColumn != -1) ? &table->Columns[column_0->PrevEnabledColumn] : NULL; if (column_1 == NULL) return; // Resizing from right-side of a Stretch column before a Fixed column forward sizing to left-side of fixed column. // (old_a + old_b == new_a + new_b) --> (new_a == old_a + old_b - new_b) float column_1_width = ImMax(column_1->WidthRequest - (column_0_width - column_0->WidthRequest), min_width); column_0_width = column_0->WidthRequest + column_1->WidthRequest - column_1_width; IM_ASSERT(column_0_width > 0.0f && column_1_width > 0.0f); column_0->WidthRequest = column_0_width; column_1->WidthRequest = column_1_width; if ((column_0->Flags | column_1->Flags) & ImGuiTableColumnFlags_WidthStretch) TableUpdateColumnsWeightFromWidth(table); table->IsSettingsDirty = true; } // Disable clipping then auto-fit, will take 2 frames // (we don't take a shortcut for unclipped columns to reduce inconsistencies when e.g. resizing multiple columns) void ImGui::TableSetColumnWidthAutoSingle(ImGuiTable* table, int column_n) { // Single auto width uses auto-fit ImGuiTableColumn* column = &table->Columns[column_n]; if (!column->IsEnabled) return; column->CannotSkipItemsQueue = (1 << 0); table->AutoFitSingleColumn = (ImGuiTableColumnIdx)column_n; } void ImGui::TableSetColumnWidthAutoAll(ImGuiTable* table) { for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (!column->IsEnabled && !(column->Flags & ImGuiTableColumnFlags_WidthStretch)) // Cannot reset weight of hidden stretch column continue; column->CannotSkipItemsQueue = (1 << 0); column->AutoFitQueue = (1 << 1); } } void ImGui::TableUpdateColumnsWeightFromWidth(ImGuiTable* table) { IM_ASSERT(table->LeftMostStretchedColumn != -1 && table->RightMostStretchedColumn != -1); // Measure existing quantity float visible_weight = 0.0f; float visible_width = 0.0f; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (!column->IsEnabled || !(column->Flags & ImGuiTableColumnFlags_WidthStretch)) continue; IM_ASSERT(column->StretchWeight > 0.0f); visible_weight += column->StretchWeight; visible_width += column->WidthRequest; } IM_ASSERT(visible_weight > 0.0f && visible_width > 0.0f); // Apply new weights for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (!column->IsEnabled || !(column->Flags & ImGuiTableColumnFlags_WidthStretch)) continue; column->StretchWeight = (column->WidthRequest / visible_width) * visible_weight; IM_ASSERT(column->StretchWeight > 0.0f); } } //------------------------------------------------------------------------- // [SECTION] Tables: Drawing //------------------------------------------------------------------------- // - TablePushBackgroundChannel() [Internal] // - TablePopBackgroundChannel() [Internal] // - TableSetupDrawChannels() [Internal] // - TableMergeDrawChannels() [Internal] // - TableDrawBorders() [Internal] //------------------------------------------------------------------------- // Bg2 is used by Selectable (and possibly other widgets) to render to the background. // Unlike our Bg0/1 channel which we uses for RowBg/CellBg/Borders and where we guarantee all shapes to be CPU-clipped, the Bg2 channel being widgets-facing will rely on regular ClipRect. void ImGui::TablePushBackgroundChannel() { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; ImGuiTable* table = g.CurrentTable; // Optimization: avoid SetCurrentChannel() + PushClipRect() table->HostBackupInnerClipRect = window->ClipRect; SetWindowClipRectBeforeSetChannel(window, table->Bg2ClipRectForDrawCmd); table->DrawSplitter.SetCurrentChannel(window->DrawList, table->Bg2DrawChannelCurrent); } void ImGui::TablePopBackgroundChannel() { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; ImGuiTable* table = g.CurrentTable; ImGuiTableColumn* column = &table->Columns[table->CurrentColumn]; // Optimization: avoid PopClipRect() + SetCurrentChannel() SetWindowClipRectBeforeSetChannel(window, table->HostBackupInnerClipRect); table->DrawSplitter.SetCurrentChannel(window->DrawList, column->DrawChannelCurrent); } // Allocate draw channels. Called by TableUpdateLayout() // - We allocate them following storage order instead of display order so reordering columns won't needlessly // increase overall dormant memory cost. // - We isolate headers draw commands in their own channels instead of just altering clip rects. // This is in order to facilitate merging of draw commands. // - After crossing FreezeRowsCount, all columns see their current draw channel changed to a second set of channels. // - We only use the dummy draw channel so we can push a null clipping rectangle into it without affecting other // channels, while simplifying per-row/per-cell overhead. It will be empty and discarded when merged. // - We allocate 1 or 2 background draw channels. This is because we know TablePushBackgroundChannel() is only used for // horizontal spanning. If we allowed vertical spanning we'd need one background draw channel per merge group (1-4). // Draw channel allocation (before merging): // - NoClip --> 2+D+1 channels: bg0/1 + bg2 + foreground (same clip rect == always 1 draw call) // - Clip --> 2+D+N channels // - FreezeRows --> 2+D+N*2 (unless scrolling value is zero) // - FreezeRows || FreezeColunns --> 3+D+N*2 (unless scrolling value is zero) // Where D is 1 if any column is clipped or hidden (dummy channel) otherwise 0. void ImGui::TableSetupDrawChannels(ImGuiTable* table) { const int freeze_row_multiplier = (table->FreezeRowsCount > 0) ? 2 : 1; const int channels_for_row = (table->Flags & ImGuiTableFlags_NoClip) ? 1 : table->ColumnsEnabledCount; const int channels_for_bg = 1 + 1 * freeze_row_multiplier; const int channels_for_dummy = (table->ColumnsEnabledCount < table->ColumnsCount || table->VisibleMaskByIndex != table->EnabledMaskByIndex) ? +1 : 0; const int channels_total = channels_for_bg + (channels_for_row * freeze_row_multiplier) + channels_for_dummy; table->DrawSplitter.Split(table->InnerWindow->DrawList, channels_total); table->DummyDrawChannel = (ImGuiTableDrawChannelIdx)((channels_for_dummy > 0) ? channels_total - 1 : -1); table->Bg2DrawChannelCurrent = TABLE_DRAW_CHANNEL_BG2_FROZEN; table->Bg2DrawChannelUnfrozen = (ImGuiTableDrawChannelIdx)((table->FreezeRowsCount > 0) ? 2 + channels_for_row : TABLE_DRAW_CHANNEL_BG2_FROZEN); int draw_channel_current = 2; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (column->IsVisibleX && column->IsVisibleY) { column->DrawChannelFrozen = (ImGuiTableDrawChannelIdx)(draw_channel_current); column->DrawChannelUnfrozen = (ImGuiTableDrawChannelIdx)(draw_channel_current + (table->FreezeRowsCount > 0 ? channels_for_row + 1 : 0)); if (!(table->Flags & ImGuiTableFlags_NoClip)) draw_channel_current++; } else { column->DrawChannelFrozen = column->DrawChannelUnfrozen = table->DummyDrawChannel; } column->DrawChannelCurrent = column->DrawChannelFrozen; } // Initial draw cmd starts with a BgClipRect that matches the one of its host, to facilitate merge draw commands by default. // All our cell highlight are manually clipped with BgClipRect. When unfreezing it will be made smaller to fit scrolling rect. // (This technically isn't part of setting up draw channels, but is reasonably related to be done here) table->BgClipRect = table->InnerClipRect; table->Bg0ClipRectForDrawCmd = table->OuterWindow->ClipRect; table->Bg2ClipRectForDrawCmd = table->HostClipRect; IM_ASSERT(table->BgClipRect.Min.y <= table->BgClipRect.Max.y); } // This function reorder draw channels based on matching clip rectangle, to facilitate merging them. Called by EndTable(). // For simplicity we call it TableMergeDrawChannels() but in fact it only reorder channels + overwrite ClipRect, // actual merging is done by table->DrawSplitter.Merge() which is called right after TableMergeDrawChannels(). // // Columns where the contents didn't stray off their local clip rectangle can be merged. To achieve // this we merge their clip rect and make them contiguous in the channel list, so they can be merged // by the call to DrawSplitter.Merge() following to the call to this function. // We reorder draw commands by arranging them into a maximum of 4 distinct groups: // // 1 group: 2 groups: 2 groups: 4 groups: // [ 0. ] no freeze [ 0. ] row freeze [ 01 ] col freeze [ 01 ] row+col freeze // [ .. ] or no scroll [ 2. ] and v-scroll [ .. ] and h-scroll [ 23 ] and v+h-scroll // // Each column itself can use 1 channel (row freeze disabled) or 2 channels (row freeze enabled). // When the contents of a column didn't stray off its limit, we move its channels into the corresponding group // based on its position (within frozen rows/columns groups or not). // At the end of the operation our 1-4 groups will each have a ImDrawCmd using the same ClipRect. // This function assume that each column are pointing to a distinct draw channel, // otherwise merge_group->ChannelsCount will not match set bit count of merge_group->ChannelsMask. // // Column channels will not be merged into one of the 1-4 groups in the following cases: // - The contents stray off its clipping rectangle (we only compare the MaxX value, not the MinX value). // Direct ImDrawList calls won't be taken into account by default, if you use them make sure the ImGui:: bounds // matches, by e.g. calling SetCursorScreenPos(). // - The channel uses more than one draw command itself. We drop all our attempt at merging stuff here.. // we could do better but it's going to be rare and probably not worth the hassle. // Columns for which the draw channel(s) haven't been merged with other will use their own ImDrawCmd. // // This function is particularly tricky to understand.. take a breath. void ImGui::TableMergeDrawChannels(ImGuiTable* table) { ImGuiContext& g = *GImGui; ImDrawListSplitter* splitter = &table->DrawSplitter; const bool has_freeze_v = (table->FreezeRowsCount > 0); const bool has_freeze_h = (table->FreezeColumnsCount > 0); IM_ASSERT(splitter->_Current == 0); // Track which groups we are going to attempt to merge, and which channels goes into each group. struct MergeGroup { ImRect ClipRect; int ChannelsCount; ImBitArray<IMGUI_TABLE_MAX_DRAW_CHANNELS> ChannelsMask; }; int merge_group_mask = 0x00; MergeGroup merge_groups[4]; memset(merge_groups, 0, sizeof(merge_groups)); // 1. Scan channels and take note of those which can be merged for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { if ((table->VisibleMaskByIndex & ((ImU64)1 << column_n)) == 0) continue; ImGuiTableColumn* column = &table->Columns[column_n]; const int merge_group_sub_count = has_freeze_v ? 2 : 1; for (int merge_group_sub_n = 0; merge_group_sub_n < merge_group_sub_count; merge_group_sub_n++) { const int channel_no = (merge_group_sub_n == 0) ? column->DrawChannelFrozen : column->DrawChannelUnfrozen; // Don't attempt to merge if there are multiple draw calls within the column ImDrawChannel* src_channel = &splitter->_Channels[channel_no]; if (src_channel->_CmdBuffer.Size > 0 && src_channel->_CmdBuffer.back().ElemCount == 0) src_channel->_CmdBuffer.pop_back(); if (src_channel->_CmdBuffer.Size != 1) continue; // Find out the width of this merge group and check if it will fit in our column // (note that we assume that rendering didn't stray on the left direction. we should need a CursorMinPos to detect it) if (!(column->Flags & ImGuiTableColumnFlags_NoClip)) { float content_max_x; if (!has_freeze_v) content_max_x = ImMax(column->ContentMaxXUnfrozen, column->ContentMaxXHeadersUsed); // No row freeze else if (merge_group_sub_n == 0) content_max_x = ImMax(column->ContentMaxXFrozen, column->ContentMaxXHeadersUsed); // Row freeze: use width before freeze else content_max_x = column->ContentMaxXUnfrozen; // Row freeze: use width after freeze if (content_max_x > column->ClipRect.Max.x) continue; } const int merge_group_n = (has_freeze_h && column_n < table->FreezeColumnsCount ? 0 : 1) + (has_freeze_v && merge_group_sub_n == 0 ? 0 : 2); IM_ASSERT(channel_no < IMGUI_TABLE_MAX_DRAW_CHANNELS); MergeGroup* merge_group = &merge_groups[merge_group_n]; if (merge_group->ChannelsCount == 0) merge_group->ClipRect = ImRect(+FLT_MAX, +FLT_MAX, -FLT_MAX, -FLT_MAX); merge_group->ChannelsMask.SetBit(channel_no); merge_group->ChannelsCount++; merge_group->ClipRect.Add(src_channel->_CmdBuffer[0].ClipRect); merge_group_mask |= (1 << merge_group_n); } // Invalidate current draw channel // (we don't clear DrawChannelFrozen/DrawChannelUnfrozen solely to facilitate debugging/later inspection of data) column->DrawChannelCurrent = (ImGuiTableDrawChannelIdx)-1; } // [DEBUG] Display merge groups #if 0 if (g.IO.KeyShift) for (int merge_group_n = 0; merge_group_n < IM_ARRAYSIZE(merge_groups); merge_group_n++) { MergeGroup* merge_group = &merge_groups[merge_group_n]; if (merge_group->ChannelsCount == 0) continue; char buf[32]; ImFormatString(buf, 32, "MG%d:%d", merge_group_n, merge_group->ChannelsCount); ImVec2 text_pos = merge_group->ClipRect.Min + ImVec2(4, 4); ImVec2 text_size = CalcTextSize(buf, NULL); GetForegroundDrawList()->AddRectFilled(text_pos, text_pos + text_size, IM_COL32(0, 0, 0, 255)); GetForegroundDrawList()->AddText(text_pos, IM_COL32(255, 255, 0, 255), buf, NULL); GetForegroundDrawList()->AddRect(merge_group->ClipRect.Min, merge_group->ClipRect.Max, IM_COL32(255, 255, 0, 255)); } #endif // 2. Rewrite channel list in our preferred order if (merge_group_mask != 0) { // We skip channel 0 (Bg0/Bg1) and 1 (Bg2 frozen) from the shuffling since they won't move - see channels allocation in TableSetupDrawChannels(). const int LEADING_DRAW_CHANNELS = 2; g.DrawChannelsTempMergeBuffer.resize(splitter->_Count - LEADING_DRAW_CHANNELS); // Use shared temporary storage so the allocation gets amortized ImDrawChannel* dst_tmp = g.DrawChannelsTempMergeBuffer.Data; ImBitArray<IMGUI_TABLE_MAX_DRAW_CHANNELS> remaining_mask; // We need 132-bit of storage remaining_mask.ClearAllBits(); remaining_mask.SetBitRange(LEADING_DRAW_CHANNELS, splitter->_Count); remaining_mask.ClearBit(table->Bg2DrawChannelUnfrozen); IM_ASSERT(has_freeze_v == false || table->Bg2DrawChannelUnfrozen != TABLE_DRAW_CHANNEL_BG2_FROZEN); int remaining_count = splitter->_Count - (has_freeze_v ? LEADING_DRAW_CHANNELS + 1 : LEADING_DRAW_CHANNELS); //ImRect host_rect = (table->InnerWindow == table->OuterWindow) ? table->InnerClipRect : table->HostClipRect; ImRect host_rect = table->HostClipRect; for (int merge_group_n = 0; merge_group_n < IM_ARRAYSIZE(merge_groups); merge_group_n++) { if (int merge_channels_count = merge_groups[merge_group_n].ChannelsCount) { MergeGroup* merge_group = &merge_groups[merge_group_n]; ImRect merge_clip_rect = merge_group->ClipRect; // Extend outer-most clip limits to match those of host, so draw calls can be merged even if // outer-most columns have some outer padding offsetting them from their parent ClipRect. // The principal cases this is dealing with are: // - On a same-window table (not scrolling = single group), all fitting columns ClipRect -> will extend and match host ClipRect -> will merge // - Columns can use padding and have left-most ClipRect.Min.x and right-most ClipRect.Max.x != from host ClipRect -> will extend and match host ClipRect -> will merge // FIXME-TABLE FIXME-WORKRECT: We are wasting a merge opportunity on tables without scrolling if column doesn't fit // within host clip rect, solely because of the half-padding difference between window->WorkRect and window->InnerClipRect. if ((merge_group_n & 1) == 0 || !has_freeze_h) merge_clip_rect.Min.x = ImMin(merge_clip_rect.Min.x, host_rect.Min.x); if ((merge_group_n & 2) == 0 || !has_freeze_v) merge_clip_rect.Min.y = ImMin(merge_clip_rect.Min.y, host_rect.Min.y); if ((merge_group_n & 1) != 0) merge_clip_rect.Max.x = ImMax(merge_clip_rect.Max.x, host_rect.Max.x); if ((merge_group_n & 2) != 0 && (table->Flags & ImGuiTableFlags_NoHostExtendY) == 0) merge_clip_rect.Max.y = ImMax(merge_clip_rect.Max.y, host_rect.Max.y); #if 0 GetOverlayDrawList()->AddRect(merge_group->ClipRect.Min, merge_group->ClipRect.Max, IM_COL32(255, 0, 0, 200), 0.0f, ~0, 1.0f); GetOverlayDrawList()->AddLine(merge_group->ClipRect.Min, merge_clip_rect.Min, IM_COL32(255, 100, 0, 200)); GetOverlayDrawList()->AddLine(merge_group->ClipRect.Max, merge_clip_rect.Max, IM_COL32(255, 100, 0, 200)); #endif remaining_count -= merge_group->ChannelsCount; for (int n = 0; n < IM_ARRAYSIZE(remaining_mask.Storage); n++) remaining_mask.Storage[n] &= ~merge_group->ChannelsMask.Storage[n]; for (int n = 0; n < splitter->_Count && merge_channels_count != 0; n++) { // Copy + overwrite new clip rect if (!merge_group->ChannelsMask.TestBit(n)) continue; merge_group->ChannelsMask.ClearBit(n); merge_channels_count--; ImDrawChannel* channel = &splitter->_Channels[n]; IM_ASSERT(channel->_CmdBuffer.Size == 1 && merge_clip_rect.Contains(ImRect(channel->_CmdBuffer[0].ClipRect))); channel->_CmdBuffer[0].ClipRect = merge_clip_rect.ToVec4(); memcpy(dst_tmp++, channel, sizeof(ImDrawChannel)); } } // Make sure Bg2DrawChannelUnfrozen appears in the middle of our groups (whereas Bg0/Bg1 and Bg2 frozen are fixed to 0 and 1) if (merge_group_n == 1 && has_freeze_v) memcpy(dst_tmp++, &splitter->_Channels[table->Bg2DrawChannelUnfrozen], sizeof(ImDrawChannel)); } // Append unmergeable channels that we didn't reorder at the end of the list for (int n = 0; n < splitter->_Count && remaining_count != 0; n++) { if (!remaining_mask.TestBit(n)) continue; ImDrawChannel* channel = &splitter->_Channels[n]; memcpy(dst_tmp++, channel, sizeof(ImDrawChannel)); remaining_count--; } IM_ASSERT(dst_tmp == g.DrawChannelsTempMergeBuffer.Data + g.DrawChannelsTempMergeBuffer.Size); memcpy(splitter->_Channels.Data + LEADING_DRAW_CHANNELS, g.DrawChannelsTempMergeBuffer.Data, (splitter->_Count - LEADING_DRAW_CHANNELS) * sizeof(ImDrawChannel)); } } // FIXME-TABLE: This is a mess, need to redesign how we render borders (as some are also done in TableEndRow) void ImGui::TableDrawBorders(ImGuiTable* table) { ImGuiWindow* inner_window = table->InnerWindow; if (!table->OuterWindow->ClipRect.Overlaps(table->OuterRect)) return; ImDrawList* inner_drawlist = inner_window->DrawList; table->DrawSplitter.SetCurrentChannel(inner_drawlist, TABLE_DRAW_CHANNEL_BG0); inner_drawlist->PushClipRect(table->Bg0ClipRectForDrawCmd.Min, table->Bg0ClipRectForDrawCmd.Max, false); // Draw inner border and resizing feedback const float border_size = TABLE_BORDER_SIZE; const float draw_y1 = table->InnerRect.Min.y; const float draw_y2_body = table->InnerRect.Max.y; const float draw_y2_head = table->IsUsingHeaders ? ImMin(table->InnerRect.Max.y, (table->FreezeRowsCount >= 1 ? table->InnerRect.Min.y : table->WorkRect.Min.y) + table->LastFirstRowHeight) : draw_y1; if (table->Flags & ImGuiTableFlags_BordersInnerV) { for (int order_n = 0; order_n < table->ColumnsCount; order_n++) { if (!(table->EnabledMaskByDisplayOrder & ((ImU64)1 << order_n))) continue; const int column_n = table->DisplayOrderToIndex[order_n]; ImGuiTableColumn* column = &table->Columns[column_n]; const bool is_hovered = (table->HoveredColumnBorder == column_n); const bool is_resized = (table->ResizedColumn == column_n) && (table->InstanceInteracted == table->InstanceCurrent); const bool is_resizable = (column->Flags & (ImGuiTableColumnFlags_NoResize | ImGuiTableColumnFlags_NoDirectResize_)) == 0; const bool is_frozen_separator = (table->FreezeColumnsCount != -1 && table->FreezeColumnsCount == order_n + 1); if (column->MaxX > table->InnerClipRect.Max.x && !is_resized) continue; // Decide whether right-most column is visible if (column->NextEnabledColumn == -1 && !is_resizable) if ((table->Flags & ImGuiTableFlags_SizingMask_) != ImGuiTableFlags_SizingFixedSame || (table->Flags & ImGuiTableFlags_NoHostExtendX)) continue; if (column->MaxX <= column->ClipRect.Min.x) // FIXME-TABLE FIXME-STYLE: Assume BorderSize==1, this is problematic if we want to increase the border size.. continue; // Draw in outer window so right-most column won't be clipped // Always draw full height border when being resized/hovered, or on the delimitation of frozen column scrolling. ImU32 col; float draw_y2; if (is_hovered || is_resized || is_frozen_separator) { draw_y2 = draw_y2_body; col = is_resized ? GetColorU32(ImGuiCol_SeparatorActive) : is_hovered ? GetColorU32(ImGuiCol_SeparatorHovered) : table->BorderColorStrong; } else { draw_y2 = (table->Flags & (ImGuiTableFlags_NoBordersInBody | ImGuiTableFlags_NoBordersInBodyUntilResize)) ? draw_y2_head : draw_y2_body; col = (table->Flags & (ImGuiTableFlags_NoBordersInBody | ImGuiTableFlags_NoBordersInBodyUntilResize)) ? table->BorderColorStrong : table->BorderColorLight; } if (draw_y2 > draw_y1) inner_drawlist->AddLine(ImVec2(column->MaxX, draw_y1), ImVec2(column->MaxX, draw_y2), col, border_size); } } // Draw outer border // FIXME: could use AddRect or explicit VLine/HLine helper? if (table->Flags & ImGuiTableFlags_BordersOuter) { // Display outer border offset by 1 which is a simple way to display it without adding an extra draw call // (Without the offset, in outer_window it would be rendered behind cells, because child windows are above their // parent. In inner_window, it won't reach out over scrollbars. Another weird solution would be to display part // of it in inner window, and the part that's over scrollbars in the outer window..) // Either solution currently won't allow us to use a larger border size: the border would clipped. const ImRect outer_border = table->OuterRect; const ImU32 outer_col = table->BorderColorStrong; if ((table->Flags & ImGuiTableFlags_BordersOuter) == ImGuiTableFlags_BordersOuter) { inner_drawlist->AddRect(outer_border.Min, outer_border.Max, outer_col, 0.0f, ~0, border_size); } else if (table->Flags & ImGuiTableFlags_BordersOuterV) { inner_drawlist->AddLine(outer_border.Min, ImVec2(outer_border.Min.x, outer_border.Max.y), outer_col, border_size); inner_drawlist->AddLine(ImVec2(outer_border.Max.x, outer_border.Min.y), outer_border.Max, outer_col, border_size); } else if (table->Flags & ImGuiTableFlags_BordersOuterH) { inner_drawlist->AddLine(outer_border.Min, ImVec2(outer_border.Max.x, outer_border.Min.y), outer_col, border_size); inner_drawlist->AddLine(ImVec2(outer_border.Min.x, outer_border.Max.y), outer_border.Max, outer_col, border_size); } } if ((table->Flags & ImGuiTableFlags_BordersInnerH) && table->RowPosY2 < table->OuterRect.Max.y) { // Draw bottom-most row border const float border_y = table->RowPosY2; if (border_y >= table->BgClipRect.Min.y && border_y < table->BgClipRect.Max.y) inner_drawlist->AddLine(ImVec2(table->BorderX1, border_y), ImVec2(table->BorderX2, border_y), table->BorderColorLight, border_size); } inner_drawlist->PopClipRect(); } //------------------------------------------------------------------------- // [SECTION] Tables: Sorting //------------------------------------------------------------------------- // - TableGetSortSpecs() // - TableFixColumnSortDirection() [Internal] // - TableGetColumnNextSortDirection() [Internal] // - TableSetColumnSortDirection() [Internal] // - TableSortSpecsSanitize() [Internal] // - TableSortSpecsBuild() [Internal] //------------------------------------------------------------------------- // Return NULL if no sort specs (most often when ImGuiTableFlags_Sortable is not set) // You can sort your data again when 'SpecsChanged == true'. It will be true with sorting specs have changed since // last call, or the first time. // Lifetime: don't hold on this pointer over multiple frames or past any subsequent call to BeginTable()! ImGuiTableSortSpecs* ImGui::TableGetSortSpecs() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL); if (!(table->Flags & ImGuiTableFlags_Sortable)) return NULL; // Require layout (in case TableHeadersRow() hasn't been called) as it may alter IsSortSpecsDirty in some paths. if (!table->IsLayoutLocked) TableUpdateLayout(table); if (table->IsSortSpecsDirty) TableSortSpecsBuild(table); return &table->SortSpecs; } static inline ImGuiSortDirection TableGetColumnAvailSortDirection(ImGuiTableColumn* column, int n) { IM_ASSERT(n < column->SortDirectionsAvailCount); return (column->SortDirectionsAvailList >> (n << 1)) & 0x03; } // Fix sort direction if currently set on a value which is unavailable (e.g. activating NoSortAscending/NoSortDescending) void ImGui::TableFixColumnSortDirection(ImGuiTable* table, ImGuiTableColumn* column) { if (column->SortOrder == -1 || (column->SortDirectionsAvailMask & (1 << column->SortDirection)) != 0) return; column->SortDirection = (ImU8)TableGetColumnAvailSortDirection(column, 0); table->IsSortSpecsDirty = true; } // Calculate next sort direction that would be set after clicking the column // - If the PreferSortDescending flag is set, we will default to a Descending direction on the first click. // - Note that the PreferSortAscending flag is never checked, it is essentially the default and therefore a no-op. IM_STATIC_ASSERT(ImGuiSortDirection_None == 0 && ImGuiSortDirection_Ascending == 1 && ImGuiSortDirection_Descending == 2); ImGuiSortDirection ImGui::TableGetColumnNextSortDirection(ImGuiTableColumn* column) { IM_ASSERT(column->SortDirectionsAvailCount > 0); if (column->SortOrder == -1) return TableGetColumnAvailSortDirection(column, 0); for (int n = 0; n < 3; n++) if (column->SortDirection == TableGetColumnAvailSortDirection(column, n)) return TableGetColumnAvailSortDirection(column, (n + 1) % column->SortDirectionsAvailCount); IM_ASSERT(0); return ImGuiSortDirection_None; } // Note that the NoSortAscending/NoSortDescending flags are processed in TableSortSpecsSanitize(), and they may change/revert // the value of SortDirection. We could technically also do it here but it would be unnecessary and duplicate code. void ImGui::TableSetColumnSortDirection(int column_n, ImGuiSortDirection sort_direction, bool append_to_sort_specs) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (!(table->Flags & ImGuiTableFlags_SortMulti)) append_to_sort_specs = false; if (!(table->Flags & ImGuiTableFlags_SortTristate)) IM_ASSERT(sort_direction != ImGuiSortDirection_None); ImGuiTableColumnIdx sort_order_max = 0; if (append_to_sort_specs) for (int other_column_n = 0; other_column_n < table->ColumnsCount; other_column_n++) sort_order_max = ImMax(sort_order_max, table->Columns[other_column_n].SortOrder); ImGuiTableColumn* column = &table->Columns[column_n]; column->SortDirection = (ImU8)sort_direction; if (column->SortDirection == ImGuiSortDirection_None) column->SortOrder = -1; else if (column->SortOrder == -1 || !append_to_sort_specs) column->SortOrder = append_to_sort_specs ? sort_order_max + 1 : 0; for (int other_column_n = 0; other_column_n < table->ColumnsCount; other_column_n++) { ImGuiTableColumn* other_column = &table->Columns[other_column_n]; if (other_column != column && !append_to_sort_specs) other_column->SortOrder = -1; TableFixColumnSortDirection(table, other_column); } table->IsSettingsDirty = true; table->IsSortSpecsDirty = true; } void ImGui::TableSortSpecsSanitize(ImGuiTable* table) { IM_ASSERT(table->Flags & ImGuiTableFlags_Sortable); // Clear SortOrder from hidden column and verify that there's no gap or duplicate. int sort_order_count = 0; ImU64 sort_order_mask = 0x00; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (column->SortOrder != -1 && !column->IsEnabled) column->SortOrder = -1; if (column->SortOrder == -1) continue; sort_order_count++; sort_order_mask |= ((ImU64)1 << column->SortOrder); IM_ASSERT(sort_order_count < (int)sizeof(sort_order_mask) * 8); } const bool need_fix_linearize = ((ImU64)1 << sort_order_count) != (sort_order_mask + 1); const bool need_fix_single_sort_order = (sort_order_count > 1) && !(table->Flags & ImGuiTableFlags_SortMulti); if (need_fix_linearize || need_fix_single_sort_order) { ImU64 fixed_mask = 0x00; for (int sort_n = 0; sort_n < sort_order_count; sort_n++) { // Fix: Rewrite sort order fields if needed so they have no gap or duplicate. // (e.g. SortOrder 0 disappeared, SortOrder 1..2 exists --> rewrite then as SortOrder 0..1) int column_with_smallest_sort_order = -1; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) if ((fixed_mask & ((ImU64)1 << (ImU64)column_n)) == 0 && table->Columns[column_n].SortOrder != -1) if (column_with_smallest_sort_order == -1 || table->Columns[column_n].SortOrder < table->Columns[column_with_smallest_sort_order].SortOrder) column_with_smallest_sort_order = column_n; IM_ASSERT(column_with_smallest_sort_order != -1); fixed_mask |= ((ImU64)1 << column_with_smallest_sort_order); table->Columns[column_with_smallest_sort_order].SortOrder = (ImGuiTableColumnIdx)sort_n; // Fix: Make sure only one column has a SortOrder if ImGuiTableFlags_MultiSortable is not set. if (need_fix_single_sort_order) { sort_order_count = 1; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) if (column_n != column_with_smallest_sort_order) table->Columns[column_n].SortOrder = -1; break; } } } // Fallback default sort order (if no column had the ImGuiTableColumnFlags_DefaultSort flag) if (sort_order_count == 0 && !(table->Flags & ImGuiTableFlags_SortTristate)) for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (column->IsEnabled && !(column->Flags & ImGuiTableColumnFlags_NoSort)) { sort_order_count = 1; column->SortOrder = 0; column->SortDirection = (ImU8)TableGetColumnAvailSortDirection(column, 0); break; } } table->SortSpecsCount = (ImGuiTableColumnIdx)sort_order_count; } void ImGui::TableSortSpecsBuild(ImGuiTable* table) { IM_ASSERT(table->IsSortSpecsDirty); TableSortSpecsSanitize(table); // Write output table->SortSpecsMulti.resize(table->SortSpecsCount <= 1 ? 0 : table->SortSpecsCount); ImGuiTableColumnSortSpecs* sort_specs = (table->SortSpecsCount == 0) ? NULL : (table->SortSpecsCount == 1) ? &table->SortSpecsSingle : table->SortSpecsMulti.Data; for (int column_n = 0; column_n < table->ColumnsCount; column_n++) { ImGuiTableColumn* column = &table->Columns[column_n]; if (column->SortOrder == -1) continue; IM_ASSERT(column->SortOrder < table->SortSpecsCount); ImGuiTableColumnSortSpecs* sort_spec = &sort_specs[column->SortOrder]; sort_spec->ColumnUserID = column->UserID; sort_spec->ColumnIndex = (ImGuiTableColumnIdx)column_n; sort_spec->SortOrder = (ImGuiTableColumnIdx)column->SortOrder; sort_spec->SortDirection = column->SortDirection; } table->SortSpecs.Specs = sort_specs; table->SortSpecs.SpecsCount = table->SortSpecsCount; table->SortSpecs.SpecsDirty = true; // Mark as dirty for user table->IsSortSpecsDirty = false; // Mark as not dirty for us } //------------------------------------------------------------------------- // [SECTION] Tables: Headers //------------------------------------------------------------------------- // - TableGetHeaderRowHeight() [Internal] // - TableHeadersRow() // - TableHeader() //------------------------------------------------------------------------- float ImGui::TableGetHeaderRowHeight() { // Caring for a minor edge case: // Calculate row height, for the unlikely case that some labels may be taller than others. // If we didn't do that, uneven header height would highlight but smaller one before the tallest wouldn't catch input for all height. // In your custom header row you may omit this all together and just call TableNextRow() without a height... float row_height = GetTextLineHeight(); int columns_count = TableGetColumnCount(); for (int column_n = 0; column_n < columns_count; column_n++) if (TableGetColumnFlags(column_n) & ImGuiTableColumnFlags_IsEnabled) row_height = ImMax(row_height, CalcTextSize(TableGetColumnName(column_n)).y); row_height += GetStyle().CellPadding.y * 2.0f; return row_height; } // [Public] This is a helper to output TableHeader() calls based on the column names declared in TableSetupColumn(). // The intent is that advanced users willing to create customized headers would not need to use this helper // and can create their own! For example: TableHeader() may be preceeded by Checkbox() or other custom widgets. // See 'Demo->Tables->Custom headers' for a demonstration of implementing a custom version of this. // This code is constructed to not make much use of internal functions, as it is intended to be a template to copy. // FIXME-TABLE: TableOpenContextMenu() and TableGetHeaderRowHeight() are not public. void ImGui::TableHeadersRow() { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL && "Need to call TableHeadersRow() after BeginTable()!"); // Layout if not already done (this is automatically done by TableNextRow, we do it here solely to facilitate stepping in debugger as it is frequent to step in TableUpdateLayout) if (!table->IsLayoutLocked) TableUpdateLayout(table); // Open row const float row_y1 = GetCursorScreenPos().y; const float row_height = TableGetHeaderRowHeight(); TableNextRow(ImGuiTableRowFlags_Headers, row_height); if (table->HostSkipItems) // Merely an optimization, you may skip in your own code. return; const int columns_count = TableGetColumnCount(); for (int column_n = 0; column_n < columns_count; column_n++) { if (!TableSetColumnIndex(column_n)) continue; // Push an id to allow unnamed labels (generally accidental, but let's behave nicely with them) // - in your own code you may omit the PushID/PopID all-together, provided you know they won't collide // - table->InstanceCurrent is only >0 when we use multiple BeginTable/EndTable calls with same identifier. const char* name = TableGetColumnName(column_n); PushID(table->InstanceCurrent * table->ColumnsCount + column_n); TableHeader(name); PopID(); } // Allow opening popup from the right-most section after the last column. ImVec2 mouse_pos = ImGui::GetMousePos(); if (IsMouseReleased(1) && TableGetHoveredColumn() == columns_count) if (mouse_pos.y >= row_y1 && mouse_pos.y < row_y1 + row_height) TableOpenContextMenu(-1); // Will open a non-column-specific popup. } // Emit a column header (text + optional sort order) // We cpu-clip text here so that all columns headers can be merged into a same draw call. // Note that because of how we cpu-clip and display sorting indicators, you _cannot_ use SameLine() after a TableHeader() void ImGui::TableHeader(const char* label) { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; if (window->SkipItems) return; ImGuiTable* table = g.CurrentTable; IM_ASSERT(table != NULL && "Need to call TableHeader() after BeginTable()!"); IM_ASSERT(table->CurrentColumn != -1); const int column_n = table->CurrentColumn; ImGuiTableColumn* column = &table->Columns[column_n]; // Label if (label == NULL) label = ""; const char* label_end = FindRenderedTextEnd(label); ImVec2 label_size = CalcTextSize(label, label_end, true); ImVec2 label_pos = window->DC.CursorPos; // If we already got a row height, there's use that. // FIXME-TABLE: Padding problem if the correct outer-padding CellBgRect strays off our ClipRect? ImRect cell_r = TableGetCellBgRect(table, column_n); float label_height = ImMax(label_size.y, table->RowMinHeight - table->CellPaddingY * 2.0f); // Calculate ideal size for sort order arrow float w_arrow = 0.0f; float w_sort_text = 0.0f; char sort_order_suf[4] = ""; const float ARROW_SCALE = 0.65f; if ((table->Flags & ImGuiTableFlags_Sortable) && !(column->Flags & ImGuiTableColumnFlags_NoSort)) { w_arrow = ImFloor(g.FontSize * ARROW_SCALE + g.Style.FramePadding.x); if (column->SortOrder > 0) { ImFormatString(sort_order_suf, IM_ARRAYSIZE(sort_order_suf), "%d", column->SortOrder + 1); w_sort_text = g.Style.ItemInnerSpacing.x + CalcTextSize(sort_order_suf).x; } } // We feed our unclipped width to the column without writing on CursorMaxPos, so that column is still considering for merging. float max_pos_x = label_pos.x + label_size.x + w_sort_text + w_arrow; column->ContentMaxXHeadersUsed = ImMax(column->ContentMaxXHeadersUsed, column->WorkMaxX); column->ContentMaxXHeadersIdeal = ImMax(column->ContentMaxXHeadersIdeal, max_pos_x); // Keep header highlighted when context menu is open. const bool selected = (table->IsContextPopupOpen && table->ContextPopupColumn == column_n && table->InstanceInteracted == table->InstanceCurrent); ImGuiID id = window->GetID(label); ImRect bb(cell_r.Min.x, cell_r.Min.y, cell_r.Max.x, ImMax(cell_r.Max.y, cell_r.Min.y + label_height + g.Style.CellPadding.y * 2.0f)); ItemSize(ImVec2(0.0f, label_height)); // Don't declare unclipped width, it'll be fed ContentMaxPosHeadersIdeal if (!ItemAdd(bb, id)) return; //GetForegroundDrawList()->AddRect(cell_r.Min, cell_r.Max, IM_COL32(255, 0, 0, 255)); // [DEBUG] //GetForegroundDrawList()->AddRect(bb.Min, bb.Max, IM_COL32(255, 0, 0, 255)); // [DEBUG] // Using AllowItemOverlap mode because we cover the whole cell, and we want user to be able to submit subsequent items. bool hovered, held; bool pressed = ButtonBehavior(bb, id, &hovered, &held, ImGuiButtonFlags_AllowItemOverlap); if (g.ActiveId != id) SetItemAllowOverlap(); if (held || hovered || selected) { const ImU32 col = GetColorU32(held ? ImGuiCol_HeaderActive : hovered ? ImGuiCol_HeaderHovered : ImGuiCol_Header); //RenderFrame(bb.Min, bb.Max, col, false, 0.0f); TableSetBgColor(ImGuiTableBgTarget_CellBg, col, table->CurrentColumn); RenderNavHighlight(bb, id, ImGuiNavHighlightFlags_TypeThin | ImGuiNavHighlightFlags_NoRounding); } else { // Submit single cell bg color in the case we didn't submit a full header row if ((table->RowFlags & ImGuiTableRowFlags_Headers) == 0) TableSetBgColor(ImGuiTableBgTarget_CellBg, GetColorU32(ImGuiCol_TableHeaderBg), table->CurrentColumn); } if (held) table->HeldHeaderColumn = (ImGuiTableColumnIdx)column_n; window->DC.CursorPos.y -= g.Style.ItemSpacing.y * 0.5f; // Drag and drop to re-order columns. // FIXME-TABLE: Scroll request while reordering a column and it lands out of the scrolling zone. if (held && (table->Flags & ImGuiTableFlags_Reorderable) && IsMouseDragging(0) && !g.DragDropActive) { // While moving a column it will jump on the other side of the mouse, so we also test for MouseDelta.x table->ReorderColumn = (ImGuiTableColumnIdx)column_n; table->InstanceInteracted = table->InstanceCurrent; // We don't reorder: through the frozen<>unfrozen line, or through a column that is marked with ImGuiTableColumnFlags_NoReorder. if (g.IO.MouseDelta.x < 0.0f && g.IO.MousePos.x < cell_r.Min.x) if (ImGuiTableColumn* prev_column = (column->PrevEnabledColumn != -1) ? &table->Columns[column->PrevEnabledColumn] : NULL) if (!((column->Flags | prev_column->Flags) & ImGuiTableColumnFlags_NoReorder)) if ((column->IndexWithinEnabledSet < table->FreezeColumnsRequest) == (prev_column->IndexWithinEnabledSet < table->FreezeColumnsRequest)) table->ReorderColumnDir = -1; if (g.IO.MouseDelta.x > 0.0f && g.IO.MousePos.x > cell_r.Max.x) if (ImGuiTableColumn* next_column = (column->NextEnabledColumn != -1) ? &table->Columns[column->NextEnabledColumn] : NULL) if (!((column->Flags | next_column->Flags) & ImGuiTableColumnFlags_NoReorder)) if ((column->IndexWithinEnabledSet < table->FreezeColumnsRequest) == (next_column->IndexWithinEnabledSet < table->FreezeColumnsRequest)) table->ReorderColumnDir = +1; } // Sort order arrow const float ellipsis_max = cell_r.Max.x - w_arrow - w_sort_text; if ((table->Flags & ImGuiTableFlags_Sortable) && !(column->Flags & ImGuiTableColumnFlags_NoSort)) { if (column->SortOrder != -1) { float x = ImMax(cell_r.Min.x, cell_r.Max.x - w_arrow - w_sort_text); float y = label_pos.y; if (column->SortOrder > 0) { PushStyleColor(ImGuiCol_Text, GetColorU32(ImGuiCol_Text, 0.70f)); RenderText(ImVec2(x + g.Style.ItemInnerSpacing.x, y), sort_order_suf); PopStyleColor(); x += w_sort_text; } RenderArrow(window->DrawList, ImVec2(x, y), GetColorU32(ImGuiCol_Text), column->SortDirection == ImGuiSortDirection_Ascending ? ImGuiDir_Up : ImGuiDir_Down, ARROW_SCALE); } // Handle clicking on column header to adjust Sort Order if (pressed && table->ReorderColumn != column_n) { ImGuiSortDirection sort_direction = TableGetColumnNextSortDirection(column); TableSetColumnSortDirection(column_n, sort_direction, g.IO.KeyShift); } } // Render clipped label. Clipping here ensure that in the majority of situations, all our header cells will // be merged into a single draw call. //window->DrawList->AddCircleFilled(ImVec2(ellipsis_max, label_pos.y), 40, IM_COL32_WHITE); RenderTextEllipsis(window->DrawList, label_pos, ImVec2(ellipsis_max, label_pos.y + label_height + g.Style.FramePadding.y), ellipsis_max, ellipsis_max, label, label_end, &label_size); const bool text_clipped = label_size.x > (ellipsis_max - label_pos.x); if (text_clipped && hovered && g.HoveredIdNotActiveTimer > g.TooltipSlowDelay) SetTooltip("%.*s", (int)(label_end - label), label); // We don't use BeginPopupContextItem() because we want the popup to stay up even after the column is hidden if (IsMouseReleased(1) && IsItemHovered()) TableOpenContextMenu(column_n); } //------------------------------------------------------------------------- // [SECTION] Tables: Context Menu //------------------------------------------------------------------------- // - TableOpenContextMenu() [Internal] // - TableDrawContextMenu() [Internal] //------------------------------------------------------------------------- // Use -1 to open menu not specific to a given column. void ImGui::TableOpenContextMenu(int column_n) { ImGuiContext& g = *GImGui; ImGuiTable* table = g.CurrentTable; if (column_n == -1 && table->CurrentColumn != -1) // When called within a column automatically use this one (for consistency) column_n = table->CurrentColumn; if (column_n == table->ColumnsCount) // To facilitate using with TableGetHoveredColumn() column_n = -1; IM_ASSERT(column_n >= -1 && column_n < table->ColumnsCount); if (table->Flags & (ImGuiTableFlags_Resizable | ImGuiTableFlags_Reorderable | ImGuiTableFlags_Hideable)) { table->IsContextPopupOpen = true; table->ContextPopupColumn = (ImGuiTableColumnIdx)column_n; table->InstanceInteracted = table->InstanceCurrent; const ImGuiID context_menu_id = ImHashStr("##ContextMenu", 0, table->ID); OpenPopupEx(context_menu_id, ImGuiPopupFlags_None); } } // Output context menu into current window (generally a popup) // FIXME-TABLE: Ideally this should be writable by the user. Full programmatic access to that data? void ImGui::TableDrawContextMenu(ImGuiTable* table) { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; if (window->SkipItems) return; bool want_separator = false; const int column_n = (table->ContextPopupColumn >= 0 && table->ContextPopupColumn < table->ColumnsCount) ? table->ContextPopupColumn : -1; ImGuiTableColumn* column = (column_n != -1) ? &table->Columns[column_n] : NULL; // Sizing if (table->Flags & ImGuiTableFlags_Resizable) { if (column != NULL) { const bool can_resize = !(column->Flags & ImGuiTableColumnFlags_NoResize) && column->IsEnabled; if (MenuItem("Size column to fit###SizeOne", NULL, false, can_resize)) TableSetColumnWidthAutoSingle(table, column_n); } const char* size_all_desc; if (table->ColumnsEnabledFixedCount == table->ColumnsEnabledCount && (table->Flags & ImGuiTableFlags_SizingMask_) != ImGuiTableFlags_SizingFixedSame) size_all_desc = "Size all columns to fit###SizeAll"; // All fixed else size_all_desc = "Size all columns to default###SizeAll"; // All stretch or mixed if (MenuItem(size_all_desc, NULL)) TableSetColumnWidthAutoAll(table); want_separator = true; } // Ordering if (table->Flags & ImGuiTableFlags_Reorderable) { if (MenuItem("Reset order", NULL, false, !table->IsDefaultDisplayOrder)) table->IsResetDisplayOrderRequest = true; want_separator = true; } // Reset all (should work but seems unnecessary/noisy to expose?) //if (MenuItem("Reset all")) // table->IsResetAllRequest = true; // Sorting // (modify TableOpenContextMenu() to add _Sortable flag if enabling this) #if 0 if ((table->Flags & ImGuiTableFlags_Sortable) && column != NULL && (column->Flags & ImGuiTableColumnFlags_NoSort) == 0) { if (want_separator) Separator(); want_separator = true; bool append_to_sort_specs = g.IO.KeyShift; if (MenuItem("Sort in Ascending Order", NULL, column->SortOrder != -1 && column->SortDirection == ImGuiSortDirection_Ascending, (column->Flags & ImGuiTableColumnFlags_NoSortAscending) == 0)) TableSetColumnSortDirection(table, column_n, ImGuiSortDirection_Ascending, append_to_sort_specs); if (MenuItem("Sort in Descending Order", NULL, column->SortOrder != -1 && column->SortDirection == ImGuiSortDirection_Descending, (column->Flags & ImGuiTableColumnFlags_NoSortDescending) == 0)) TableSetColumnSortDirection(table, column_n, ImGuiSortDirection_Descending, append_to_sort_specs); } #endif // Hiding / Visibility if (table->Flags & ImGuiTableFlags_Hideable) { if (want_separator) Separator(); want_separator = true; PushItemFlag(ImGuiItemFlags_SelectableDontClosePopup, true); for (int other_column_n = 0; other_column_n < table->ColumnsCount; other_column_n++) { ImGuiTableColumn* other_column = &table->Columns[other_column_n]; const char* name = TableGetColumnName(table, other_column_n); if (name == NULL || name[0] == 0) name = "<Unknown>"; // Make sure we can't hide the last active column bool menu_item_active = (other_column->Flags & ImGuiTableColumnFlags_NoHide) ? false : true; if (other_column->IsEnabled && table->ColumnsEnabledCount <= 1) menu_item_active = false; if (MenuItem(name, NULL, other_column->IsEnabled, menu_item_active)) other_column->IsEnabledNextFrame = !other_column->IsEnabled; } PopItemFlag(); } } //------------------------------------------------------------------------- // [SECTION] Tables: Settings (.ini data) //------------------------------------------------------------------------- // FIXME: The binding/finding/creating flow are too confusing. //------------------------------------------------------------------------- // - TableSettingsInit() [Internal] // - TableSettingsCalcChunkSize() [Internal] // - TableSettingsCreate() [Internal] // - TableSettingsFindByID() [Internal] // - TableGetBoundSettings() [Internal] // - TableResetSettings() // - TableSaveSettings() [Internal] // - TableLoadSettings() [Internal] // - TableSettingsHandler_ClearAll() [Internal] // - TableSettingsHandler_ApplyAll() [Internal] // - TableSettingsHandler_ReadOpen() [Internal] // - TableSettingsHandler_ReadLine() [Internal] // - TableSettingsHandler_WriteAll() [Internal] // - TableSettingsInstallHandler() [Internal] //------------------------------------------------------------------------- // [Init] 1: TableSettingsHandler_ReadXXXX() Load and parse .ini file into TableSettings. // [Main] 2: TableLoadSettings() When table is created, bind Table to TableSettings, serialize TableSettings data into Table. // [Main] 3: TableSaveSettings() When table properties are modified, serialize Table data into bound or new TableSettings, mark .ini as dirty. // [Main] 4: TableSettingsHandler_WriteAll() When .ini file is dirty (which can come from other source), save TableSettings into .ini file. //------------------------------------------------------------------------- // Clear and initialize empty settings instance static void TableSettingsInit(ImGuiTableSettings* settings, ImGuiID id, int columns_count, int columns_count_max) { IM_PLACEMENT_NEW(settings) ImGuiTableSettings(); ImGuiTableColumnSettings* settings_column = settings->GetColumnSettings(); for (int n = 0; n < columns_count_max; n++, settings_column++) IM_PLACEMENT_NEW(settings_column) ImGuiTableColumnSettings(); settings->ID = id; settings->ColumnsCount = (ImGuiTableColumnIdx)columns_count; settings->ColumnsCountMax = (ImGuiTableColumnIdx)columns_count_max; settings->WantApply = true; } static size_t TableSettingsCalcChunkSize(int columns_count) { return sizeof(ImGuiTableSettings) + (size_t)columns_count * sizeof(ImGuiTableColumnSettings); } ImGuiTableSettings* ImGui::TableSettingsCreate(ImGuiID id, int columns_count) { ImGuiContext& g = *GImGui; ImGuiTableSettings* settings = g.SettingsTables.alloc_chunk(TableSettingsCalcChunkSize(columns_count)); TableSettingsInit(settings, id, columns_count, columns_count); return settings; } // Find existing settings ImGuiTableSettings* ImGui::TableSettingsFindByID(ImGuiID id) { // FIXME-OPT: Might want to store a lookup map for this? ImGuiContext& g = *GImGui; for (ImGuiTableSettings* settings = g.SettingsTables.begin(); settings != NULL; settings = g.SettingsTables.next_chunk(settings)) if (settings->ID == id) return settings; return NULL; } // Get settings for a given table, NULL if none ImGuiTableSettings* ImGui::TableGetBoundSettings(ImGuiTable* table) { if (table->SettingsOffset != -1) { ImGuiContext& g = *GImGui; ImGuiTableSettings* settings = g.SettingsTables.ptr_from_offset(table->SettingsOffset); IM_ASSERT(settings->ID == table->ID); if (settings->ColumnsCountMax >= table->ColumnsCount) return settings; // OK settings->ID = 0; // Invalidate storage, we won't fit because of a count change } return NULL; } // Restore initial state of table (with or without saved settings) void ImGui::TableResetSettings(ImGuiTable* table) { table->IsInitializing = table->IsSettingsDirty = true; table->IsResetAllRequest = false; table->IsSettingsRequestLoad = false; // Don't reload from ini table->SettingsLoadedFlags = ImGuiTableFlags_None; // Mark as nothing loaded so our initialized data becomes authoritative } void ImGui::TableSaveSettings(ImGuiTable* table) { table->IsSettingsDirty = false; if (table->Flags & ImGuiTableFlags_NoSavedSettings) return; // Bind or create settings data ImGuiContext& g = *GImGui; ImGuiTableSettings* settings = TableGetBoundSettings(table); if (settings == NULL) { settings = TableSettingsCreate(table->ID, table->ColumnsCount); table->SettingsOffset = g.SettingsTables.offset_from_ptr(settings); } settings->ColumnsCount = (ImGuiTableColumnIdx)table->ColumnsCount; // Serialize ImGuiTable/ImGuiTableColumn into ImGuiTableSettings/ImGuiTableColumnSettings IM_ASSERT(settings->ID == table->ID); IM_ASSERT(settings->ColumnsCount == table->ColumnsCount && settings->ColumnsCountMax >= settings->ColumnsCount); ImGuiTableColumn* column = table->Columns.Data; ImGuiTableColumnSettings* column_settings = settings->GetColumnSettings(); bool save_ref_scale = false; settings->SaveFlags = ImGuiTableFlags_None; for (int n = 0; n < table->ColumnsCount; n++, column++, column_settings++) { const float width_or_weight = (column->Flags & ImGuiTableColumnFlags_WidthStretch) ? column->StretchWeight : column->WidthRequest; column_settings->WidthOrWeight = width_or_weight; column_settings->Index = (ImGuiTableColumnIdx)n; column_settings->DisplayOrder = column->DisplayOrder; column_settings->SortOrder = column->SortOrder; column_settings->SortDirection = column->SortDirection; column_settings->IsEnabled = column->IsEnabled; column_settings->IsStretch = (column->Flags & ImGuiTableColumnFlags_WidthStretch) ? 1 : 0; if ((column->Flags & ImGuiTableColumnFlags_WidthStretch) == 0) save_ref_scale = true; // We skip saving some data in the .ini file when they are unnecessary to restore our state. // Note that fixed width where initial width was derived from auto-fit will always be saved as InitStretchWeightOrWidth will be 0.0f. // FIXME-TABLE: We don't have logic to easily compare SortOrder to DefaultSortOrder yet so it's always saved when present. if (width_or_weight != column->InitStretchWeightOrWidth) settings->SaveFlags |= ImGuiTableFlags_Resizable; if (column->DisplayOrder != n) settings->SaveFlags |= ImGuiTableFlags_Reorderable; if (column->SortOrder != -1) settings->SaveFlags |= ImGuiTableFlags_Sortable; if (column->IsEnabled != ((column->Flags & ImGuiTableColumnFlags_DefaultHide) == 0)) settings->SaveFlags |= ImGuiTableFlags_Hideable; } settings->SaveFlags &= table->Flags; settings->RefScale = save_ref_scale ? table->RefScale : 0.0f; MarkIniSettingsDirty(); } void ImGui::TableLoadSettings(ImGuiTable* table) { ImGuiContext& g = *GImGui; table->IsSettingsRequestLoad = false; if (table->Flags & ImGuiTableFlags_NoSavedSettings) return; // Bind settings ImGuiTableSettings* settings; if (table->SettingsOffset == -1) { settings = TableSettingsFindByID(table->ID); if (settings == NULL) return; if (settings->ColumnsCount != table->ColumnsCount) // Allow settings if columns count changed. We could otherwise decide to return... table->IsSettingsDirty = true; table->SettingsOffset = g.SettingsTables.offset_from_ptr(settings); } else { settings = TableGetBoundSettings(table); } table->SettingsLoadedFlags = settings->SaveFlags; table->RefScale = settings->RefScale; // Serialize ImGuiTableSettings/ImGuiTableColumnSettings into ImGuiTable/ImGuiTableColumn ImGuiTableColumnSettings* column_settings = settings->GetColumnSettings(); ImU64 display_order_mask = 0; for (int data_n = 0; data_n < settings->ColumnsCount; data_n++, column_settings++) { int column_n = column_settings->Index; if (column_n < 0 || column_n >= table->ColumnsCount) continue; ImGuiTableColumn* column = &table->Columns[column_n]; if (settings->SaveFlags & ImGuiTableFlags_Resizable) { if (column_settings->IsStretch) column->StretchWeight = column_settings->WidthOrWeight; else column->WidthRequest = column_settings->WidthOrWeight; column->AutoFitQueue = 0x00; } if (settings->SaveFlags & ImGuiTableFlags_Reorderable) column->DisplayOrder = column_settings->DisplayOrder; else column->DisplayOrder = (ImGuiTableColumnIdx)column_n; display_order_mask |= (ImU64)1 << column->DisplayOrder; column->IsEnabled = column->IsEnabledNextFrame = column_settings->IsEnabled; column->SortOrder = column_settings->SortOrder; column->SortDirection = column_settings->SortDirection; } // Validate and fix invalid display order data const ImU64 expected_display_order_mask = (settings->ColumnsCount == 64) ? ~0 : ((ImU64)1 << settings->ColumnsCount) - 1; if (display_order_mask != expected_display_order_mask) for (int column_n = 0; column_n < table->ColumnsCount; column_n++) table->Columns[column_n].DisplayOrder = (ImGuiTableColumnIdx)column_n; // Rebuild index for (int column_n = 0; column_n < table->ColumnsCount; column_n++) table->DisplayOrderToIndex[table->Columns[column_n].DisplayOrder] = (ImGuiTableColumnIdx)column_n; } static void TableSettingsHandler_ClearAll(ImGuiContext* ctx, ImGuiSettingsHandler*) { ImGuiContext& g = *ctx; for (int i = 0; i != g.Tables.GetSize(); i++) g.Tables.GetByIndex(i)->SettingsOffset = -1; g.SettingsTables.clear(); } // Apply to existing windows (if any) static void TableSettingsHandler_ApplyAll(ImGuiContext* ctx, ImGuiSettingsHandler*) { ImGuiContext& g = *ctx; for (int i = 0; i != g.Tables.GetSize(); i++) { ImGuiTable* table = g.Tables.GetByIndex(i); table->IsSettingsRequestLoad = true; table->SettingsOffset = -1; } } static void* TableSettingsHandler_ReadOpen(ImGuiContext*, ImGuiSettingsHandler*, const char* name) { ImGuiID id = 0; int columns_count = 0; if (sscanf(name, "0x%08X,%d", &id, &columns_count) < 2) return NULL; if (ImGuiTableSettings* settings = ImGui::TableSettingsFindByID(id)) { if (settings->ColumnsCountMax >= columns_count) { TableSettingsInit(settings, id, columns_count, settings->ColumnsCountMax); // Recycle return settings; } settings->ID = 0; // Invalidate storage, we won't fit because of a count change } return ImGui::TableSettingsCreate(id, columns_count); } static void TableSettingsHandler_ReadLine(ImGuiContext*, ImGuiSettingsHandler*, void* entry, const char* line) { // "Column 0 UserID=0x42AD2D21 Width=100 Visible=1 Order=0 Sort=0v" ImGuiTableSettings* settings = (ImGuiTableSettings*)entry; float f = 0.0f; int column_n = 0, r = 0, n = 0; if (sscanf(line, "RefScale=%f", &f) == 1) { settings->RefScale = f; return; } if (sscanf(line, "Column %d%n", &column_n, &r) == 1) { if (column_n < 0 || column_n >= settings->ColumnsCount) return; line = ImStrSkipBlank(line + r); char c = 0; ImGuiTableColumnSettings* column = settings->GetColumnSettings() + column_n; column->Index = (ImGuiTableColumnIdx)column_n; if (sscanf(line, "UserID=0x%08X%n", (ImU32*)&n, &r)==1) { line = ImStrSkipBlank(line + r); column->UserID = (ImGuiID)n; } if (sscanf(line, "Width=%d%n", &n, &r) == 1) { line = ImStrSkipBlank(line + r); column->WidthOrWeight = (float)n; column->IsStretch = 0; settings->SaveFlags |= ImGuiTableFlags_Resizable; } if (sscanf(line, "Weight=%f%n", &f, &r) == 1) { line = ImStrSkipBlank(line + r); column->WidthOrWeight = f; column->IsStretch = 1; settings->SaveFlags |= ImGuiTableFlags_Resizable; } if (sscanf(line, "Visible=%d%n", &n, &r) == 1) { line = ImStrSkipBlank(line + r); column->IsEnabled = (ImU8)n; settings->SaveFlags |= ImGuiTableFlags_Hideable; } if (sscanf(line, "Order=%d%n", &n, &r) == 1) { line = ImStrSkipBlank(line + r); column->DisplayOrder = (ImGuiTableColumnIdx)n; settings->SaveFlags |= ImGuiTableFlags_Reorderable; } if (sscanf(line, "Sort=%d%c%n", &n, &c, &r) == 2) { line = ImStrSkipBlank(line + r); column->SortOrder = (ImGuiTableColumnIdx)n; column->SortDirection = (c == '^') ? ImGuiSortDirection_Descending : ImGuiSortDirection_Ascending; settings->SaveFlags |= ImGuiTableFlags_Sortable; } } } static void TableSettingsHandler_WriteAll(ImGuiContext* ctx, ImGuiSettingsHandler* handler, ImGuiTextBuffer* buf) { ImGuiContext& g = *ctx; for (ImGuiTableSettings* settings = g.SettingsTables.begin(); settings != NULL; settings = g.SettingsTables.next_chunk(settings)) { if (settings->ID == 0) // Skip ditched settings continue; // TableSaveSettings() may clear some of those flags when we establish that the data can be stripped // (e.g. Order was unchanged) const bool save_size = (settings->SaveFlags & ImGuiTableFlags_Resizable) != 0; const bool save_visible = (settings->SaveFlags & ImGuiTableFlags_Hideable) != 0; const bool save_order = (settings->SaveFlags & ImGuiTableFlags_Reorderable) != 0; const bool save_sort = (settings->SaveFlags & ImGuiTableFlags_Sortable) != 0; if (!save_size && !save_visible && !save_order && !save_sort) continue; buf->reserve(buf->size() + 30 + settings->ColumnsCount * 50); // ballpark reserve buf->appendf("[%s][0x%08X,%d]\n", handler->TypeName, settings->ID, settings->ColumnsCount); if (settings->RefScale != 0.0f) buf->appendf("RefScale=%g\n", settings->RefScale); ImGuiTableColumnSettings* column = settings->GetColumnSettings(); for (int column_n = 0; column_n < settings->ColumnsCount; column_n++, column++) { // "Column 0 UserID=0x42AD2D21 Width=100 Visible=1 Order=0 Sort=0v" buf->appendf("Column %-2d", column_n); if (column->UserID != 0) buf->appendf(" UserID=%08X", column->UserID); if (save_size && column->IsStretch) buf->appendf(" Weight=%.4f", column->WidthOrWeight); if (save_size && !column->IsStretch) buf->appendf(" Width=%d", (int)column->WidthOrWeight); if (save_visible) buf->appendf(" Visible=%d", column->IsEnabled); if (save_order) buf->appendf(" Order=%d", column->DisplayOrder); if (save_sort && column->SortOrder != -1) buf->appendf(" Sort=%d%c", column->SortOrder, (column->SortDirection == ImGuiSortDirection_Ascending) ? 'v' : '^'); buf->append("\n"); } buf->append("\n"); } } void ImGui::TableSettingsInstallHandler(ImGuiContext* context) { ImGuiContext& g = *context; ImGuiSettingsHandler ini_handler; ini_handler.TypeName = "Table"; ini_handler.TypeHash = ImHashStr("Table"); ini_handler.ClearAllFn = TableSettingsHandler_ClearAll; ini_handler.ReadOpenFn = TableSettingsHandler_ReadOpen; ini_handler.ReadLineFn = TableSettingsHandler_ReadLine; ini_handler.ApplyAllFn = TableSettingsHandler_ApplyAll; ini_handler.WriteAllFn = TableSettingsHandler_WriteAll; g.SettingsHandlers.push_back(ini_handler); } //------------------------------------------------------------------------- // [SECTION] Tables: Garbage Collection //------------------------------------------------------------------------- // - TableRemove() [Internal] // - TableGcCompactTransientBuffers() [Internal] // - TableGcCompactSettings() [Internal] //------------------------------------------------------------------------- // Remove Table (currently only used by TestEngine) void ImGui::TableRemove(ImGuiTable* table) { //IMGUI_DEBUG_LOG("TableRemove() id=0x%08X\n", table->ID); ImGuiContext& g = *GImGui; int table_idx = g.Tables.GetIndex(table); //memset(table->RawData.Data, 0, table->RawData.size_in_bytes()); //memset(table, 0, sizeof(ImGuiTable)); g.Tables.Remove(table->ID, table); g.TablesLastTimeActive[table_idx] = -1.0f; } // Free up/compact internal Table buffers for when it gets unused void ImGui::TableGcCompactTransientBuffers(ImGuiTable* table) { //IMGUI_DEBUG_LOG("TableGcCompactTransientBuffers() id=0x%08X\n", table->ID); ImGuiContext& g = *GImGui; IM_ASSERT(table->MemoryCompacted == false); table->DrawSplitter.ClearFreeMemory(); table->SortSpecsMulti.clear(); table->SortSpecs.Specs = NULL; table->IsSortSpecsDirty = true; table->ColumnsNames.clear(); table->MemoryCompacted = true; for (int n = 0; n < table->ColumnsCount; n++) table->Columns[n].NameOffset = -1; g.TablesLastTimeActive[g.Tables.GetIndex(table)] = -1.0f; } // Compact and remove unused settings data (currently only used by TestEngine) void ImGui::TableGcCompactSettings() { ImGuiContext& g = *GImGui; int required_memory = 0; for (ImGuiTableSettings* settings = g.SettingsTables.begin(); settings != NULL; settings = g.SettingsTables.next_chunk(settings)) if (settings->ID != 0) required_memory += (int)TableSettingsCalcChunkSize(settings->ColumnsCount); if (required_memory == g.SettingsTables.Buf.Size) return; ImChunkStream<ImGuiTableSettings> new_chunk_stream; new_chunk_stream.Buf.reserve(required_memory); for (ImGuiTableSettings* settings = g.SettingsTables.begin(); settings != NULL; settings = g.SettingsTables.next_chunk(settings)) if (settings->ID != 0) memcpy(new_chunk_stream.alloc_chunk(TableSettingsCalcChunkSize(settings->ColumnsCount)), settings, TableSettingsCalcChunkSize(settings->ColumnsCount)); g.SettingsTables.swap(new_chunk_stream); } //------------------------------------------------------------------------- // [SECTION] Tables: Debugging //------------------------------------------------------------------------- // - DebugNodeTable() [Internal] //------------------------------------------------------------------------- #ifndef IMGUI_DISABLE_METRICS_WINDOW static const char* DebugNodeTableGetSizingPolicyDesc(ImGuiTableFlags sizing_policy) { sizing_policy &= ImGuiTableFlags_SizingMask_; if (sizing_policy == ImGuiTableFlags_SizingFixedFit) { return "FixedFit"; } if (sizing_policy == ImGuiTableFlags_SizingFixedSame) { return "FixedSame"; } if (sizing_policy == ImGuiTableFlags_SizingStretchProp) { return "StretchProp"; } if (sizing_policy == ImGuiTableFlags_SizingStretchSame) { return "StretchSame"; } return "N/A"; } void ImGui::DebugNodeTable(ImGuiTable* table) { char buf[512]; char* p = buf; const char* buf_end = buf + IM_ARRAYSIZE(buf); const bool is_active = (table->LastFrameActive >= ImGui::GetFrameCount() - 2); // Note that fully clipped early out scrolling tables will appear as inactive here. ImFormatString(p, buf_end - p, "Table 0x%08X (%d columns, in '%s')%s", table->ID, table->ColumnsCount, table->OuterWindow->Name, is_active ? "" : " *Inactive*"); if (!is_active) { PushStyleColor(ImGuiCol_Text, GetStyleColorVec4(ImGuiCol_TextDisabled)); } bool open = TreeNode(table, "%s", buf); if (!is_active) { PopStyleColor(); } if (IsItemHovered()) GetForegroundDrawList()->AddRect(table->OuterRect.Min, table->OuterRect.Max, IM_COL32(255, 255, 0, 255)); if (IsItemVisible() && table->HoveredColumnBody != -1) GetForegroundDrawList()->AddRect(GetItemRectMin(), GetItemRectMax(), IM_COL32(255, 255, 0, 255)); if (!open) return; bool clear_settings = SmallButton("Clear settings"); BulletText("OuterRect: Pos: (%.1f,%.1f) Size: (%.1f,%.1f) Sizing: '%s'", table->OuterRect.Min.x, table->OuterRect.Min.y, table->OuterRect.GetWidth(), table->OuterRect.GetHeight(), DebugNodeTableGetSizingPolicyDesc(table->Flags)); BulletText("ColumnsGivenWidth: %.1f, ColumnsAutoFitWidth: %.1f, InnerWidth: %.1f%s", table->ColumnsGivenWidth, table->ColumnsAutoFitWidth, table->InnerWidth, table->InnerWidth == 0.0f ? " (auto)" : ""); BulletText("CellPaddingX: %.1f, CellSpacingX: %.1f/%.1f, OuterPaddingX: %.1f", table->CellPaddingX, table->CellSpacingX1, table->CellSpacingX2, table->OuterPaddingX); BulletText("HoveredColumnBody: %d, HoveredColumnBorder: %d", table->HoveredColumnBody, table->HoveredColumnBorder); BulletText("ResizedColumn: %d, ReorderColumn: %d, HeldHeaderColumn: %d", table->ResizedColumn, table->ReorderColumn, table->HeldHeaderColumn); //BulletText("BgDrawChannels: %d/%d", 0, table->BgDrawChannelUnfrozen); float sum_weights = 0.0f; for (int n = 0; n < table->ColumnsCount; n++) if (table->Columns[n].Flags & ImGuiTableColumnFlags_WidthStretch) sum_weights += table->Columns[n].StretchWeight; for (int n = 0; n < table->ColumnsCount; n++) { ImGuiTableColumn* column = &table->Columns[n]; const char* name = TableGetColumnName(table, n); ImFormatString(buf, IM_ARRAYSIZE(buf), "Column %d order %d '%s': offset %+.2f to %+.2f%s\n" "Enabled: %d, VisibleX/Y: %d/%d, RequestOutput: %d, SkipItems: %d, DrawChannels: %d,%d\n" "WidthGiven: %.1f, Request/Auto: %.1f/%.1f, StretchWeight: %.3f (%.1f%%)\n" "MinX: %.1f, MaxX: %.1f (%+.1f), ClipRect: %.1f to %.1f (+%.1f)\n" "ContentWidth: %.1f,%.1f, HeadersUsed/Ideal %.1f/%.1f\n" "Sort: %d%s, UserID: 0x%08X, Flags: 0x%04X: %s%s%s..", n, column->DisplayOrder, name, column->MinX - table->WorkRect.Min.x, column->MaxX - table->WorkRect.Min.x, (n < table->FreezeColumnsRequest) ? " (Frozen)" : "", column->IsEnabled, column->IsVisibleX, column->IsVisibleY, column->IsRequestOutput, column->IsSkipItems, column->DrawChannelFrozen, column->DrawChannelUnfrozen, column->WidthGiven, column->WidthRequest, column->WidthAuto, column->StretchWeight, column->StretchWeight > 0.0f ? (column->StretchWeight / sum_weights) * 100.0f : 0.0f, column->MinX, column->MaxX, column->MaxX - column->MinX, column->ClipRect.Min.x, column->ClipRect.Max.x, column->ClipRect.Max.x - column->ClipRect.Min.x, column->ContentMaxXFrozen - column->WorkMinX, column->ContentMaxXUnfrozen - column->WorkMinX, column->ContentMaxXHeadersUsed - column->WorkMinX, column->ContentMaxXHeadersIdeal - column->WorkMinX, column->SortOrder, (column->SortDirection == ImGuiSortDirection_Ascending) ? " (Asc)" : (column->SortDirection == ImGuiSortDirection_Descending) ? " (Des)" : "", column->UserID, column->Flags, (column->Flags & ImGuiTableColumnFlags_WidthStretch) ? "WidthStretch " : "", (column->Flags & ImGuiTableColumnFlags_WidthFixed) ? "WidthFixed " : "", (column->Flags & ImGuiTableColumnFlags_NoResize) ? "NoResize " : ""); Bullet(); Selectable(buf); if (IsItemHovered()) { ImRect r(column->MinX, table->OuterRect.Min.y, column->MaxX, table->OuterRect.Max.y); GetForegroundDrawList()->AddRect(r.Min, r.Max, IM_COL32(255, 255, 0, 255)); } } if (ImGuiTableSettings* settings = TableGetBoundSettings(table)) DebugNodeTableSettings(settings); if (clear_settings) table->IsResetAllRequest = true; TreePop(); } void ImGui::DebugNodeTableSettings(ImGuiTableSettings* settings) { if (!TreeNode((void*)(intptr_t)settings->ID, "Settings 0x%08X (%d columns)", settings->ID, settings->ColumnsCount)) return; BulletText("SaveFlags: 0x%08X", settings->SaveFlags); BulletText("ColumnsCount: %d (max %d)", settings->ColumnsCount, settings->ColumnsCountMax); for (int n = 0; n < settings->ColumnsCount; n++) { ImGuiTableColumnSettings* column_settings = &settings->GetColumnSettings()[n]; ImGuiSortDirection sort_dir = (column_settings->SortOrder != -1) ? (ImGuiSortDirection)column_settings->SortDirection : ImGuiSortDirection_None; BulletText("Column %d Order %d SortOrder %d %s Vis %d %s %7.3f UserID 0x%08X", n, column_settings->DisplayOrder, column_settings->SortOrder, (sort_dir == ImGuiSortDirection_Ascending) ? "Asc" : (sort_dir == ImGuiSortDirection_Descending) ? "Des" : "---", column_settings->IsEnabled, column_settings->IsStretch ? "Weight" : "Width ", column_settings->WidthOrWeight, column_settings->UserID); } TreePop(); } #else // #ifndef IMGUI_DISABLE_METRICS_WINDOW void ImGui::DebugNodeTable(ImGuiTable*) {} void ImGui::DebugNodeTableSettings(ImGuiTableSettings*) {} #endif //------------------------------------------------------------------------- // [SECTION] Columns, BeginColumns, EndColumns, etc. // (This is a legacy API, prefer using BeginTable/EndTable!) //------------------------------------------------------------------------- // FIXME: sizing is lossy when columns width is very small (default width may turn negative etc.) //------------------------------------------------------------------------- // - SetWindowClipRectBeforeSetChannel() [Internal] // - GetColumnIndex() // - GetColumnsCount() // - GetColumnOffset() // - GetColumnWidth() // - SetColumnOffset() // - SetColumnWidth() // - PushColumnClipRect() [Internal] // - PushColumnsBackground() [Internal] // - PopColumnsBackground() [Internal] // - FindOrCreateColumns() [Internal] // - GetColumnsID() [Internal] // - BeginColumns() // - NextColumn() // - EndColumns() // - Columns() //------------------------------------------------------------------------- // [Internal] Small optimization to avoid calls to PopClipRect/SetCurrentChannel/PushClipRect in sequences, // they would meddle many times with the underlying ImDrawCmd. // Instead, we do a preemptive overwrite of clipping rectangle _without_ altering the command-buffer and let // the subsequent single call to SetCurrentChannel() does it things once. void ImGui::SetWindowClipRectBeforeSetChannel(ImGuiWindow* window, const ImRect& clip_rect) { ImVec4 clip_rect_vec4 = clip_rect.ToVec4(); window->ClipRect = clip_rect; window->DrawList->_CmdHeader.ClipRect = clip_rect_vec4; window->DrawList->_ClipRectStack.Data[window->DrawList->_ClipRectStack.Size - 1] = clip_rect_vec4; } int ImGui::GetColumnIndex() { ImGuiWindow* window = GetCurrentWindowRead(); return window->DC.CurrentColumns ? window->DC.CurrentColumns->Current : 0; } int ImGui::GetColumnsCount() { ImGuiWindow* window = GetCurrentWindowRead(); return window->DC.CurrentColumns ? window->DC.CurrentColumns->Count : 1; } float ImGui::GetColumnOffsetFromNorm(const ImGuiOldColumns* columns, float offset_norm) { return offset_norm * (columns->OffMaxX - columns->OffMinX); } float ImGui::GetColumnNormFromOffset(const ImGuiOldColumns* columns, float offset) { return offset / (columns->OffMaxX - columns->OffMinX); } static const float COLUMNS_HIT_RECT_HALF_WIDTH = 4.0f; static float GetDraggedColumnOffset(ImGuiOldColumns* columns, int column_index) { // Active (dragged) column always follow mouse. The reason we need this is that dragging a column to the right edge of an auto-resizing // window creates a feedback loop because we store normalized positions. So while dragging we enforce absolute positioning. ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; IM_ASSERT(column_index > 0); // We are not supposed to drag column 0. IM_ASSERT(g.ActiveId == columns->ID + ImGuiID(column_index)); float x = g.IO.MousePos.x - g.ActiveIdClickOffset.x + COLUMNS_HIT_RECT_HALF_WIDTH - window->Pos.x; x = ImMax(x, ImGui::GetColumnOffset(column_index - 1) + g.Style.ColumnsMinSpacing); if ((columns->Flags & ImGuiOldColumnFlags_NoPreserveWidths)) x = ImMin(x, ImGui::GetColumnOffset(column_index + 1) - g.Style.ColumnsMinSpacing); return x; } float ImGui::GetColumnOffset(int column_index) { ImGuiWindow* window = GetCurrentWindowRead(); ImGuiOldColumns* columns = window->DC.CurrentColumns; if (columns == NULL) return 0.0f; if (column_index < 0) column_index = columns->Current; IM_ASSERT(column_index < columns->Columns.Size); const float t = columns->Columns[column_index].OffsetNorm; const float x_offset = ImLerp(columns->OffMinX, columns->OffMaxX, t); return x_offset; } static float GetColumnWidthEx(ImGuiOldColumns* columns, int column_index, bool before_resize = false) { if (column_index < 0) column_index = columns->Current; float offset_norm; if (before_resize) offset_norm = columns->Columns[column_index + 1].OffsetNormBeforeResize - columns->Columns[column_index].OffsetNormBeforeResize; else offset_norm = columns->Columns[column_index + 1].OffsetNorm - columns->Columns[column_index].OffsetNorm; return ImGui::GetColumnOffsetFromNorm(columns, offset_norm); } float ImGui::GetColumnWidth(int column_index) { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; ImGuiOldColumns* columns = window->DC.CurrentColumns; if (columns == NULL) return GetContentRegionAvail().x; if (column_index < 0) column_index = columns->Current; return GetColumnOffsetFromNorm(columns, columns->Columns[column_index + 1].OffsetNorm - columns->Columns[column_index].OffsetNorm); } void ImGui::SetColumnOffset(int column_index, float offset) { ImGuiContext& g = *GImGui; ImGuiWindow* window = g.CurrentWindow; ImGuiOldColumns* columns = window->DC.CurrentColumns; IM_ASSERT(columns != NULL); if (column_index < 0) column_index = columns->Current; IM_ASSERT(column_index < columns->Columns.Size); const bool preserve_width = !(columns->Flags & ImGuiOldColumnFlags_NoPreserveWidths) && (column_index < columns->Count - 1); const float width = preserve_width ? GetColumnWidthEx(columns, column_index, columns->IsBeingResized) : 0.0f; if (!(columns->Flags & ImGuiOldColumnFlags_NoForceWithinWindow)) offset = ImMin(offset, columns->OffMaxX - g.Style.ColumnsMinSpacing * (columns->Count - column_index)); columns->Columns[column_index].OffsetNorm = GetColumnNormFromOffset(columns, offset - columns->OffMinX); if (preserve_width) SetColumnOffset(column_index + 1, offset + ImMax(g.Style.ColumnsMinSpacing, width)); } void ImGui::SetColumnWidth(int column_index, float width) { ImGuiWindow* window = GetCurrentWindowRead(); ImGuiOldColumns* columns = window->DC.CurrentColumns; IM_ASSERT(columns != NULL); if (column_index < 0) column_index = columns->Current; SetColumnOffset(column_index + 1, GetColumnOffset(column_index) + width); } void ImGui::PushColumnClipRect(int column_index) { ImGuiWindow* window = GetCurrentWindowRead(); ImGuiOldColumns* columns = window->DC.CurrentColumns; if (column_index < 0) column_index = columns->Current; ImGuiOldColumnData* column = &columns->Columns[column_index]; PushClipRect(column->ClipRect.Min, column->ClipRect.Max, false); } // Get into the columns background draw command (which is generally the same draw command as before we called BeginColumns) void ImGui::PushColumnsBackground() { ImGuiWindow* window = GetCurrentWindowRead(); ImGuiOldColumns* columns = window->DC.CurrentColumns; if (columns->Count == 1) return; // Optimization: avoid SetCurrentChannel() + PushClipRect() columns->HostBackupClipRect = window->ClipRect; SetWindowClipRectBeforeSetChannel(window, columns->HostInitialClipRect); columns->Splitter.SetCurrentChannel(window->DrawList, 0); } void ImGui::PopColumnsBackground() { ImGuiWindow* window = GetCurrentWindowRead(); ImGuiOldColumns* columns = window->DC.CurrentColumns; if (columns->Count == 1) return; // Optimization: avoid PopClipRect() + SetCurrentChannel() SetWindowClipRectBeforeSetChannel(window, columns->HostBackupClipRect); columns->Splitter.SetCurrentChannel(window->DrawList, columns->Current + 1); } ImGuiOldColumns* ImGui::FindOrCreateColumns(ImGuiWindow* window, ImGuiID id) { // We have few columns per window so for now we don't need bother much with turning this into a faster lookup. for (int n = 0; n < window->ColumnsStorage.Size; n++) if (window->ColumnsStorage[n].ID == id) return &window->ColumnsStorage[n]; window->ColumnsStorage.push_back(ImGuiOldColumns()); ImGuiOldColumns* columns = &window->ColumnsStorage.back(); columns->ID = id; return columns; } ImGuiID ImGui::GetColumnsID(const char* str_id, int columns_count) { ImGuiWindow* window = GetCurrentWindow(); // Differentiate column ID with an arbitrary prefix for cases where users name their columns set the same as another widget. // In addition, when an identifier isn't explicitly provided we include the number of columns in the hash to make it uniquer. PushID(0x11223347 + (str_id ? 0 : columns_count)); ImGuiID id = window->GetID(str_id ? str_id : "columns"); PopID(); return id; } void ImGui::BeginColumns(const char* str_id, int columns_count, ImGuiOldColumnFlags flags) { ImGuiContext& g = *GImGui; ImGuiWindow* window = GetCurrentWindow(); IM_ASSERT(columns_count >= 1); IM_ASSERT(window->DC.CurrentColumns == NULL); // Nested columns are currently not supported // Acquire storage for the columns set ImGuiID id = GetColumnsID(str_id, columns_count); ImGuiOldColumns* columns = FindOrCreateColumns(window, id); IM_ASSERT(columns->ID == id); columns->Current = 0; columns->Count = columns_count; columns->Flags = flags; window->DC.CurrentColumns = columns; columns->HostCursorPosY = window->DC.CursorPos.y; columns->HostCursorMaxPosX = window->DC.CursorMaxPos.x; columns->HostInitialClipRect = window->ClipRect; columns->HostBackupParentWorkRect = window->ParentWorkRect; window->ParentWorkRect = window->WorkRect; // Set state for first column // We aim so that the right-most column will have the same clipping width as other after being clipped by parent ClipRect const float column_padding = g.Style.ItemSpacing.x; const float half_clip_extend_x = ImFloor(ImMax(window->WindowPadding.x * 0.5f, window->WindowBorderSize)); const float max_1 = window->WorkRect.Max.x + column_padding - ImMax(column_padding - window->WindowPadding.x, 0.0f); const float max_2 = window->WorkRect.Max.x + half_clip_extend_x; columns->OffMinX = window->DC.Indent.x - column_padding + ImMax(column_padding - window->WindowPadding.x, 0.0f); columns->OffMaxX = ImMax(ImMin(max_1, max_2) - window->Pos.x, columns->OffMinX + 1.0f); columns->LineMinY = columns->LineMaxY = window->DC.CursorPos.y; // Clear data if columns count changed if (columns->Columns.Size != 0 && columns->Columns.Size != columns_count + 1) columns->Columns.resize(0); // Initialize default widths columns->IsFirstFrame = (columns->Columns.Size == 0); if (columns->Columns.Size == 0) { columns->Columns.reserve(columns_count + 1); for (int n = 0; n < columns_count + 1; n++) { ImGuiOldColumnData column; column.OffsetNorm = n / (float)columns_count; columns->Columns.push_back(column); } } for (int n = 0; n < columns_count; n++) { // Compute clipping rectangle ImGuiOldColumnData* column = &columns->Columns[n]; float clip_x1 = IM_ROUND(window->Pos.x + GetColumnOffset(n)); float clip_x2 = IM_ROUND(window->Pos.x + GetColumnOffset(n + 1) - 1.0f); column->ClipRect = ImRect(clip_x1, -FLT_MAX, clip_x2, +FLT_MAX); column->ClipRect.ClipWithFull(window->ClipRect); } if (columns->Count > 1) { columns->Splitter.Split(window->DrawList, 1 + columns->Count); columns->Splitter.SetCurrentChannel(window->DrawList, 1); PushColumnClipRect(0); } // We don't generally store Indent.x inside ColumnsOffset because it may be manipulated by the user. float offset_0 = GetColumnOffset(columns->Current); float offset_1 = GetColumnOffset(columns->Current + 1); float width = offset_1 - offset_0; PushItemWidth(width * 0.65f); window->DC.ColumnsOffset.x = ImMax(column_padding - window->WindowPadding.x, 0.0f); window->DC.CursorPos.x = IM_FLOOR(window->Pos.x + window->DC.Indent.x + window->DC.ColumnsOffset.x); window->WorkRect.Max.x = window->Pos.x + offset_1 - column_padding; } void ImGui::NextColumn() { ImGuiWindow* window = GetCurrentWindow(); if (window->SkipItems || window->DC.CurrentColumns == NULL) return; ImGuiContext& g = *GImGui; ImGuiOldColumns* columns = window->DC.CurrentColumns; if (columns->Count == 1) { window->DC.CursorPos.x = IM_FLOOR(window->Pos.x + window->DC.Indent.x + window->DC.ColumnsOffset.x); IM_ASSERT(columns->Current == 0); return; } // Next column if (++columns->Current == columns->Count) columns->Current = 0; PopItemWidth(); // Optimization: avoid PopClipRect() + SetCurrentChannel() + PushClipRect() // (which would needlessly attempt to update commands in the wrong channel, then pop or overwrite them), ImGuiOldColumnData* column = &columns->Columns[columns->Current]; SetWindowClipRectBeforeSetChannel(window, column->ClipRect); columns->Splitter.SetCurrentChannel(window->DrawList, columns->Current + 1); const float column_padding = g.Style.ItemSpacing.x; columns->LineMaxY = ImMax(columns->LineMaxY, window->DC.CursorPos.y); if (columns->Current > 0) { // Columns 1+ ignore IndentX (by canceling it out) // FIXME-COLUMNS: Unnecessary, could be locked? window->DC.ColumnsOffset.x = GetColumnOffset(columns->Current) - window->DC.Indent.x + column_padding; } else { // New row/line: column 0 honor IndentX. window->DC.ColumnsOffset.x = ImMax(column_padding - window->WindowPadding.x, 0.0f); columns->LineMinY = columns->LineMaxY; } window->DC.CursorPos.x = IM_FLOOR(window->Pos.x + window->DC.Indent.x + window->DC.ColumnsOffset.x); window->DC.CursorPos.y = columns->LineMinY; window->DC.CurrLineSize = ImVec2(0.0f, 0.0f); window->DC.CurrLineTextBaseOffset = 0.0f; // FIXME-COLUMNS: Share code with BeginColumns() - move code on columns setup. float offset_0 = GetColumnOffset(columns->Current); float offset_1 = GetColumnOffset(columns->Current + 1); float width = offset_1 - offset_0; PushItemWidth(width * 0.65f); window->WorkRect.Max.x = window->Pos.x + offset_1 - column_padding; } void ImGui::EndColumns() { ImGuiContext& g = *GImGui; ImGuiWindow* window = GetCurrentWindow(); ImGuiOldColumns* columns = window->DC.CurrentColumns; IM_ASSERT(columns != NULL); PopItemWidth(); if (columns->Count > 1) { PopClipRect(); columns->Splitter.Merge(window->DrawList); } const ImGuiOldColumnFlags flags = columns->Flags; columns->LineMaxY = ImMax(columns->LineMaxY, window->DC.CursorPos.y); window->DC.CursorPos.y = columns->LineMaxY; if (!(flags & ImGuiOldColumnFlags_GrowParentContentsSize)) window->DC.CursorMaxPos.x = columns->HostCursorMaxPosX; // Restore cursor max pos, as columns don't grow parent // Draw columns borders and handle resize // The IsBeingResized flag ensure we preserve pre-resize columns width so back-and-forth are not lossy bool is_being_resized = false; if (!(flags & ImGuiOldColumnFlags_NoBorder) && !window->SkipItems) { // We clip Y boundaries CPU side because very long triangles are mishandled by some GPU drivers. const float y1 = ImMax(columns->HostCursorPosY, window->ClipRect.Min.y); const float y2 = ImMin(window->DC.CursorPos.y, window->ClipRect.Max.y); int dragging_column = -1; for (int n = 1; n < columns->Count; n++) { ImGuiOldColumnData* column = &columns->Columns[n]; float x = window->Pos.x + GetColumnOffset(n); const ImGuiID column_id = columns->ID + ImGuiID(n); const float column_hit_hw = COLUMNS_HIT_RECT_HALF_WIDTH; const ImRect column_hit_rect(ImVec2(x - column_hit_hw, y1), ImVec2(x + column_hit_hw, y2)); KeepAliveID(column_id); if (IsClippedEx(column_hit_rect, column_id, false)) continue; bool hovered = false, held = false; if (!(flags & ImGuiOldColumnFlags_NoResize)) { ButtonBehavior(column_hit_rect, column_id, &hovered, &held); if (hovered || held) g.MouseCursor = ImGuiMouseCursor_ResizeEW; if (held && !(column->Flags & ImGuiOldColumnFlags_NoResize)) dragging_column = n; } // Draw column const ImU32 col = GetColorU32(held ? ImGuiCol_SeparatorActive : hovered ? ImGuiCol_SeparatorHovered : ImGuiCol_Separator); const float xi = IM_FLOOR(x); window->DrawList->AddLine(ImVec2(xi, y1 + 1.0f), ImVec2(xi, y2), col); } // Apply dragging after drawing the column lines, so our rendered lines are in sync with how items were displayed during the frame. if (dragging_column != -1) { if (!columns->IsBeingResized) for (int n = 0; n < columns->Count + 1; n++) columns->Columns[n].OffsetNormBeforeResize = columns->Columns[n].OffsetNorm; columns->IsBeingResized = is_being_resized = true; float x = GetDraggedColumnOffset(columns, dragging_column); SetColumnOffset(dragging_column, x); } } columns->IsBeingResized = is_being_resized; window->WorkRect = window->ParentWorkRect; window->ParentWorkRect = columns->HostBackupParentWorkRect; window->DC.CurrentColumns = NULL; window->DC.ColumnsOffset.x = 0.0f; window->DC.CursorPos.x = IM_FLOOR(window->Pos.x + window->DC.Indent.x + window->DC.ColumnsOffset.x); } void ImGui::Columns(int columns_count, const char* id, bool border) { ImGuiWindow* window = GetCurrentWindow(); IM_ASSERT(columns_count >= 1); ImGuiOldColumnFlags flags = (border ? 0 : ImGuiOldColumnFlags_NoBorder); //flags |= ImGuiOldColumnFlags_NoPreserveWidths; // NB: Legacy behavior ImGuiOldColumns* columns = window->DC.CurrentColumns; if (columns != NULL && columns->Count == columns_count && columns->Flags == flags) return; if (columns != NULL) EndColumns(); if (columns_count != 1) BeginColumns(id, columns_count, flags); } //------------------------------------------------------------------------- #endif // #ifndef IMGUI_DISABLE
/*========================================================================= * * Copyright Insight Software Consortium * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #ifndef itkLaplacianSharpeningImageFilter_hxx #define itkLaplacianSharpeningImageFilter_hxx #include "itkLaplacianSharpeningImageFilter.h" #include "itkNeighborhoodOperatorImageFilter.h" #include "itkLaplacianOperator.h" #include "itkProgressAccumulator.h" #include "itkMinimumMaximumImageCalculator.h" #include "itkImageRegionIterator.h" namespace itk { template< typename TInputImage, typename TOutputImage > void LaplacianSharpeningImageFilter< TInputImage, TOutputImage > ::PrintSelf(std::ostream & os, Indent indent) const { Superclass::PrintSelf(os, indent); os << indent << "UseImageSpacing = " << m_UseImageSpacing << std::endl; } template< typename TInputImage, typename TOutputImage > void LaplacianSharpeningImageFilter< TInputImage, TOutputImage > ::GenerateInputRequestedRegion() { // call the superclass' implementation of this method. This should // copy the output requested region to the input requested region Superclass::GenerateInputRequestedRegion(); // get pointers to the input and output InputImagePointer inputPtr = const_cast< TInputImage * >( this->GetInput() ); if ( !inputPtr ) { return; } // Build an operator so that we can determine the kernel size LaplacianOperator< RealType, ImageDimension > oper; oper.CreateOperator(); // get a copy of the input requested region (should equal the output // requested region) typename TInputImage::RegionType inputRequestedRegion; inputRequestedRegion = inputPtr->GetRequestedRegion(); // pad the input requested region by the operator radius inputRequestedRegion.PadByRadius( oper.GetRadius() ); // crop the input requested region at the input's largest possible region if ( inputRequestedRegion.Crop( inputPtr->GetLargestPossibleRegion() ) ) { inputPtr->SetRequestedRegion(inputRequestedRegion); return; } else { // Couldn't crop the region (requested region is outside the largest // possible region). Throw an exception. // store what we tried to request (prior to trying to crop) inputPtr->SetRequestedRegion(inputRequestedRegion); // build an exception InvalidRequestedRegionError e(__FILE__, __LINE__); e.SetLocation(ITK_LOCATION); e.SetDescription("Requested region is (at least partially) outside the largest possible region."); e.SetDataObject(inputPtr); throw e; } } template< typename TInputImage, typename TOutputImage > void LaplacianSharpeningImageFilter< TInputImage, TOutputImage > ::GenerateData() { // Create the Laplacian operator LaplacianOperator< RealType, ImageDimension > oper; double s[ImageDimension]; for ( unsigned i = 0; i < ImageDimension; i++ ) { if ( this->GetInput()->GetSpacing()[i] == 0.0 ) { itkExceptionMacro(<< "Image spacing cannot be zero"); } else { s[i] = 1.0 / this->GetInput()->GetSpacing()[i]; } } oper.SetDerivativeScalings(s); oper.CreateOperator(); // do calculations in floating point typedef Image< RealType, ImageDimension > RealImageType; typedef NeighborhoodOperatorImageFilter< InputImageType, RealImageType > NOIF; ZeroFluxNeumannBoundaryCondition< InputImageType > nbc; typename NOIF::Pointer filter = NOIF::New(); filter->OverrideBoundaryCondition( static_cast< typename NOIF::ImageBoundaryConditionPointerType >( &nbc ) ); // Create a process accumulator for tracking the progress of this minipipeline ProgressAccumulator::Pointer progress = ProgressAccumulator::New(); progress->SetMiniPipelineFilter(this); // Register the filter with the with progress accumulator using // equal weight proportion progress->RegisterInternalFilter(filter, 0.8f); // // set up the mini-pipline // filter->SetOperator(oper); filter->SetInput( this->GetInput() ); filter->GetOutput() ->SetRequestedRegion( this->GetOutput()->GetRequestedRegion() ); // execute the mini-pipeline filter->Update(); // determine how the data will need to scaled to be properly combined typename MinimumMaximumImageCalculator< InputImageType >::Pointer inputCalculator = MinimumMaximumImageCalculator< InputImageType >::New(); typename MinimumMaximumImageCalculator< RealImageType >::Pointer filteredCalculator = MinimumMaximumImageCalculator< RealImageType >::New(); inputCalculator->SetImage( this->GetInput() ); inputCalculator->SetRegion( this->GetOutput()->GetRequestedRegion() ); inputCalculator->Compute(); filteredCalculator->SetImage( filter->GetOutput() ); filteredCalculator->SetRegion( this->GetOutput()->GetRequestedRegion() ); filteredCalculator->Compute(); RealType inputShift, inputScale, filteredShift, filteredScale; inputShift = static_cast< RealType >( inputCalculator->GetMinimum() ); inputScale = static_cast< RealType >( inputCalculator->GetMaximum() ) - static_cast< RealType >( inputCalculator->GetMinimum() ); filteredShift = filteredCalculator->GetMinimum(); // no need to cast filteredScale = filteredCalculator->GetMaximum() - filteredCalculator->GetMinimum(); ImageRegionIterator< RealImageType > it( filter->GetOutput(), filter->GetOutput()->GetRequestedRegion() ); ImageRegionConstIterator< InputImageType > inIt( this->GetInput(), this->GetOutput()->GetRequestedRegion() ); // combine the input and laplacian images RealType value, invalue; RealType inputSum = 0.0; RealType enhancedSum = 0.0; while ( !it.IsAtEnd() ) { value = it.Get(); // laplacian value // rescale to [0,1] value = ( value - filteredShift ) / filteredScale; // rescale to the input dynamic range value = value * inputScale + inputShift; // combine the input and laplacian image (note that we subtract // the laplacian due to the signs in our laplacian kernel). invalue = static_cast< RealType >( inIt.Get() ); value = invalue - value; it.Set(value); inputSum += invalue; enhancedSum += value; ++it; ++inIt; } RealType inputMean = inputSum / static_cast< RealType >( this->GetOutput()->GetRequestedRegion() .GetNumberOfPixels() ); RealType enhancedMean = enhancedSum / static_cast< RealType >( this->GetOutput()->GetRequestedRegion() .GetNumberOfPixels() ); // update progress this->UpdateProgress(0.9); // copy and cast the output typename TOutputImage::Pointer output = this->GetOutput(); output->SetBufferedRegion( output->GetRequestedRegion() ); output->Allocate(); RealType inputMinimum = inputCalculator->GetMinimum(); RealType inputMaximum = inputCalculator->GetMaximum(); OutputPixelType castInputMinimum = static_cast< OutputPixelType >( inputMinimum ); OutputPixelType castInputMaximum = static_cast< OutputPixelType >( inputMaximum ); ImageRegionIterator< OutputImageType > outIt = ImageRegionIterator< OutputImageType >( output, output->GetRequestedRegion() ); outIt.GoToBegin(); it.GoToBegin(); while ( !outIt.IsAtEnd() ) { value = it.Get(); // adjust value to make the mean intensities before and after match value = value - enhancedMean + inputMean; if ( value < inputMinimum ) { outIt.Set(castInputMinimum); } else if ( value > inputMaximum ) { outIt.Set(castInputMaximum); } else { outIt.Set( static_cast< OutputPixelType >( value ) ); } ++outIt; ++it; } // update progress this->UpdateProgress(1.0); } } // end namespace itk #endif
/******************************************************************************* * Copyright 2018 BGx Graphics Framework. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ******************************************************************************/ /* * author abijosh@gmail.com */ #include "BillboardParticleBatch.hpp"
// See https://github.com/intel/llvm-test-suite/issues/867 for detailed status // UNSUPPORTED: hip // RUN: %clangxx -fsycl -fsycl-unnamed-lambda -fsycl-targets=%sycl_triple %s -o %t.out -Xsycl-target-backend=nvptx64-nvidia-cuda --cuda-gpu-arch=sm_70 // RUN: %HOST_RUN_PLACEHOLDER %t.out // RUN: %GPU_RUN_PLACEHOLDER %t.out // RUN: %CPU_RUN_PLACEHOLDER %t.out // RUN: %ACC_RUN_PLACEHOLDER %t.out #include "sub.h" int main() { sub_test_all<access::address_space::global_space>(); }
/* * yosys -- Yosys Open SYnthesis Suite * * Copyright (C) 2012 Clifford Wolf <clifford@clifford.at> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include "kernel/yosys.h" #include "kernel/sigtools.h" USING_YOSYS_NAMESPACE PRIVATE_NAMESPACE_BEGIN void invert_gp_dff(Cell *cell, bool invert_input) { string cell_type = cell->type.str(); bool cell_type_i = cell_type.find('I') != string::npos; bool cell_type_r = cell_type.find('R') != string::npos; bool cell_type_s = cell_type.find('S') != string::npos; if (!invert_input) { Const initval = cell->getParam("\\INIT"); if (GetSize(initval) >= 1) { if (initval.bits[0] == State::S0) initval.bits[0] = State::S1; else if (initval.bits[0] == State::S1) initval.bits[0] = State::S0; cell->setParam("\\INIT", initval); } if (cell_type_r && cell_type_s) { Const srmode = cell->getParam("\\SRMODE"); if (GetSize(srmode) >= 1) { if (srmode.bits[0] == State::S0) srmode.bits[0] = State::S1; else if (srmode.bits[0] == State::S1) srmode.bits[0] = State::S0; cell->setParam("\\SRMODE", srmode); } } else { if (cell_type_r) { cell->setPort("\\nSET", cell->getPort("\\nRST")); cell->unsetPort("\\nRST"); cell_type_r = false; cell_type_s = true; } else if (cell_type_s) { cell->setPort("\\nRST", cell->getPort("\\nSET")); cell->unsetPort("\\nSET"); cell_type_r = true; cell_type_s = false; } } } if (cell_type_i) { cell->setPort("\\Q", cell->getPort("\\nQ")); cell->unsetPort("\\nQ"); cell_type_i = false; } else { cell->setPort("\\nQ", cell->getPort("\\Q")); cell->unsetPort("\\Q"); cell_type_i = true; } cell->type = stringf("\\GP_DFF%s%s%s", cell_type_s ? "S" : "", cell_type_r ? "R" : "", cell_type_i ? "I" : ""); log("Merged %s inverter into cell %s.%s: %s -> %s\n", invert_input ? "input" : "output", log_id(cell->module), log_id(cell), cell_type.c_str()+1, log_id(cell->type)); } struct Greenpak4DffInvPass : public Pass { Greenpak4DffInvPass() : Pass("greenpak4_dffinv", "merge greenpak4 inverters and DFFs") { } virtual void help() { log("\n"); log(" greenpak4_dffinv [options] [selection]\n"); log("\n"); log("Merge GP_INV cells with GP_DFF* cells.\n"); log("\n"); } virtual void execute(std::vector<std::string> args, RTLIL::Design *design) { log_header(design, "Executing GREENPAK4_DFFINV pass (merge synchronous set/reset into FF cells).\n"); size_t argidx; for (argidx = 1; argidx < args.size(); argidx++) { // if (args[argidx] == "-singleton") { // singleton_mode = true; // continue; // } break; } extra_args(args, argidx, design); pool<IdString> gp_dff_types; gp_dff_types.insert("\\GP_DFF"); gp_dff_types.insert("\\GP_DFFI"); gp_dff_types.insert("\\GP_DFFR"); gp_dff_types.insert("\\GP_DFFRI"); gp_dff_types.insert("\\GP_DFFS"); gp_dff_types.insert("\\GP_DFFSI"); gp_dff_types.insert("\\GP_DFFSR"); gp_dff_types.insert("\\GP_DFFSRI"); for (auto module : design->selected_modules()) { SigMap sigmap(module); dict<SigBit, int> sig_use_cnt; dict<SigBit, SigBit> inv_in2out, inv_out2in; dict<SigBit, Cell*> inv_in2cell; pool<Cell*> dff_cells; for (auto wire : module->wires()) { if (!wire->port_output) continue; for (auto bit : sigmap(wire)) sig_use_cnt[bit]++; } for (auto cell : module->cells()) for (auto &conn : cell->connections()) if (cell->input(conn.first) || !cell->known()) for (auto bit : sigmap(conn.second)) sig_use_cnt[bit]++; for (auto cell : module->selected_cells()) { if (gp_dff_types.count(cell->type)) { dff_cells.insert(cell); continue; } if (cell->type == "\\GP_INV") { SigBit in_bit = sigmap(cell->getPort("\\IN")); SigBit out_bit = sigmap(cell->getPort("\\OUT")); inv_in2out[in_bit] = out_bit; inv_out2in[out_bit] = in_bit; inv_in2cell[in_bit] = cell; continue; } } for (auto cell : dff_cells) { SigBit d_bit = sigmap(cell->getPort("\\D")); SigBit q_bit = sigmap(cell->hasPort("\\Q") ? cell->getPort("\\Q") : cell->getPort("\\nQ")); while (inv_out2in.count(d_bit)) { sig_use_cnt[d_bit]--; invert_gp_dff(cell, true); d_bit = inv_out2in.at(d_bit); cell->setPort("\\D", d_bit); sig_use_cnt[d_bit]++; } while (inv_in2out.count(q_bit) && sig_use_cnt[q_bit] == 1) { SigBit new_q_bit = inv_in2out.at(q_bit); module->remove(inv_in2cell.at(q_bit)); sig_use_cnt.erase(q_bit); inv_in2out.erase(q_bit); inv_out2in.erase(new_q_bit); inv_in2cell.erase(q_bit); invert_gp_dff(cell, false); if (cell->hasPort("\\Q")) cell->setPort("\\Q", new_q_bit); else cell->setPort("\\nQ", new_q_bit); } } } } } Greenpak4DffInvPass; PRIVATE_NAMESPACE_END
#ifndef RBX_BUILTIN_STATICSCOPE_HPP #define RBX_BUILTIN_STATICSCOPE_HPP #include "builtin/object.hpp" #include "type_info.hpp" namespace rubinius { class Module; class ConstantScope : public Object { public: const static object_type type = ConstantScopeType; private: Module* module_; // slot // This is used like the ruby_class MRI variable. It lets // manipulate this aspect of the class lexical enclosure // without having to change module also. Module* current_module_; // slot ConstantScope* parent_; // slot public: /* accessors */ attr_accessor(module, Module); attr_accessor(current_module, Module); attr_accessor(parent, ConstantScope); /* interface */ static void init(STATE); static void bootstrap_methods(STATE); static ConstantScope* create(STATE); // Rubinius.primitive :constant_scope_of_sender static ConstantScope* of_sender(STATE, CallFrame* calling_environment); // Rubinius.primitive :constant_scope_const_set Object* const_set(STATE, Object* name, Object* value); // The module to use when adding and removing methods Module* for_method_definition(); // Rubinius.primitive :constant_scope_cvar_defined Object* cvar_defined(STATE, Symbol* name); // Rubinius.primitive :constant_scope_cvar_get Object* cvar_get(STATE, Symbol* name); // Rubinius.primitive :constant_scope_cvar_set Object* cvar_set(STATE, Symbol* name, Object* value); // Rubinius.primitive :constant_scope_cvar_get_or_set Object* cvar_get_or_set(STATE, Symbol* name, Object* value); bool top_level_p(STATE) { return parent_->nil_p(); } class Info : public TypeInfo { public: BASIC_TYPEINFO(TypeInfo) }; }; } #endif
#include <core/compiler/ignored_warnings.h> // COMPILE TIME CHECKS #include <core/compiler/cpp_version_check.h> // C++11 and later supported #include <core/compiler/os_version_check.h> // Linux and Windows supported // RUNTIME CHECKS #include <core/memory/cpu_memory.h> // To see if cache line we are running on // matches the compiled one #include <memory> #include <core/self_process.h> #include <core/single_instance.h> #include <core/logger/logger.h> #include <core/file_utility.h> #include <server/server_configuration.h> #include <server/server_offline.h> #include <server/server_fix.h> #include <server/server_error.h> #include <server/server_constants.h> using namespace std; int main () { // Initial checks if (core::getCPUCacheLineSize() != CACHE_LINE_SIZE) { auto message = core::format("This executable compiled for cache line size %d , but you are running on a CPU with a cache line of %d", CACHE_LINE_SIZE, core::getCPUCacheLineSize()); onError(ServerError::NON_SUPPORTED_EXECUTION, message); } // Single instance protection core::SingleInstance singleInstance; if (singleInstance() == false ) { onError(ServerError::ALREADY_RUNNING); } if (core::SelfProcess::amIAdmin() == false) { // Mainly needed for ability to set thread priorities core::consoleOutputWithColor(core::ConsoleColor::FG_RED, " WARNING : Program didn`t start with admin/root rights. Therefore will not be able to modify thread priorities.\n"); } // Load configuration file ServerConfiguration serverConfiguration; try { // Set current working directory as current executable`s directory core::SelfProcess::setCurrentWorkingDirectory(core::SelfProcess::getCurrentExecutableDirectory()); serverConfiguration.load(server_constants::CONFIGURATION_FILE); // Set process priority core::SelfProcess::setPriority(core::SelfProcess::getProcessPriorityFromString(serverConfiguration.getProcessPriority())); // Start logger if enabled core::Logger::getInstance()->initialise(serverConfiguration.getLoggerConfiguration()); core::Logger::getInstance()->start(); LOG_INFO("Main thread", "starting") unique_ptr<ServerBase> server; if (serverConfiguration.getOrderEntryMode() == "FIX") { // FIX SERVER MODE server.reset(new ServerFix(serverConfiguration)); } else { // OFFLINE ORDER ENTRY MODE server.reset(new ServerOffline(serverConfiguration)); } server->run(); } catch (const std::invalid_argument & e) { onError(ServerError::RUNTIME_ERROR, e.what()); } catch (const std::runtime_error & e) { onError(ServerError::RUNTIME_ERROR, e.what()); } catch (const std::bad_alloc &) { onError(ServerError::INSUFFICIENT_MEMORY); } catch (...) { onError(ServerError::UNKNOWN_PROBLEM); } // Application exit LOG_INFO("Main thread", "Ending") core::Logger::getInstance()->shutdown(); return 0; }
//===--- FrontendAction.cpp -----------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// #include "clang/Frontend/FrontendAction.h" #include "clang/AST/ASTContext.h" #include "clang/Lex/HeaderSearch.h" #include "clang/Lex/Preprocessor.h" #include "clang/Frontend/ASTUnit.h" #include "clang/Frontend/CompilerInstance.h" #include "clang/Frontend/FrontendDiagnostic.h" #include "clang/Sema/ParseAST.h" #include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/Timer.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" using namespace clang; FrontendAction::FrontendAction() : Instance(0), CurrentTimer(0) {} FrontendAction::~FrontendAction() {} void FrontendAction::setCurrentFile(llvm::StringRef Value, ASTUnit *AST) { CurrentFile = Value; CurrentASTUnit.reset(AST); } bool FrontendAction::BeginSourceFile(CompilerInstance &CI, llvm::StringRef Filename, bool IsAST) { assert(!Instance && "Already processing a source file!"); assert(!Filename.empty() && "Unexpected empty filename!"); setCurrentFile(Filename); setCompilerInstance(&CI); // AST files follow a very different path, since they share objects via the // AST unit. if (IsAST) { assert(!usesPreprocessorOnly() && "Attempt to pass AST file to preprocessor only action!"); assert(hasASTSupport() && "This action does not have AST support!"); std::string Error; ASTUnit *AST = ASTUnit::LoadFromPCHFile(Filename, &Error); if (!AST) { CI.getDiagnostics().Report(diag::err_fe_invalid_ast_file) << Error; goto failure; } setCurrentFile(Filename, AST); // Set the shared objects, these are reset when we finish processing the // file, otherwise the CompilerInstance will happily destroy them. CI.setFileManager(&AST->getFileManager()); CI.setSourceManager(&AST->getSourceManager()); CI.setPreprocessor(&AST->getPreprocessor()); CI.setASTContext(&AST->getASTContext()); // Initialize the action. if (!BeginSourceFileAction(CI, Filename)) goto failure; /// Create the AST consumer. CI.setASTConsumer(CreateASTConsumer(CI, Filename)); if (!CI.hasASTConsumer()) goto failure; return true; } // Inform the diagnostic client we are processing a source file. CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), &CI.getPreprocessor()); // Initialize the action. if (!BeginSourceFileAction(CI, Filename)) goto failure; /// Create the AST context and consumer unless this is a preprocessor only /// action. if (!usesPreprocessorOnly()) { CI.createASTContext(); CI.setASTConsumer(CreateASTConsumer(CI, Filename)); if (!CI.hasASTConsumer()) goto failure; /// Use PCH? if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) { assert(hasPCHSupport() && "This action does not have PCH support!"); CI.createPCHExternalASTSource( CI.getPreprocessorOpts().ImplicitPCHInclude); if (!CI.getASTContext().getExternalSource()) goto failure; } } // Initialize builtin info as long as we aren't using an external AST // source. if (!CI.hasASTContext() || !CI.getASTContext().getExternalSource()) { Preprocessor &PP = CI.getPreprocessor(); PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(), PP.getLangOptions().NoBuiltin); } return true; // If we failed, reset state since the client will not end up calling the // matching EndSourceFile(). failure: if (isCurrentFileAST()) { CI.takeASTContext(); CI.takePreprocessor(); CI.takeSourceManager(); CI.takeFileManager(); } CI.getDiagnosticClient().EndSourceFile(); setCurrentFile(""); setCompilerInstance(0); return false; } void FrontendAction::Execute() { CompilerInstance &CI = getCompilerInstance(); // Initialize the main file entry. This needs to be delayed until after PCH // has loaded. if (isCurrentFileAST()) { // Set the main file ID to an empty file. // // FIXME: We probably shouldn't need this, but for now this is the // simplest way to reuse the logic in ParseAST. const char *EmptyStr = ""; llvm::MemoryBuffer *SB = llvm::MemoryBuffer::getMemBuffer(EmptyStr, EmptyStr, "<dummy input>"); CI.getSourceManager().createMainFileIDForMemBuffer(SB); } else { if (!CI.InitializeSourceManager(getCurrentFile())) return; } llvm::TimeRegion Timer(CurrentTimer); ExecuteAction(); } void FrontendAction::EndSourceFile() { CompilerInstance &CI = getCompilerInstance(); // Finalize the action. EndSourceFileAction(); // Release the consumer and the AST, in that order since the consumer may // perform actions in its destructor which require the context. // // FIXME: There is more per-file stuff we could just drop here? if (CI.getFrontendOpts().DisableFree) { CI.takeASTConsumer(); if (!isCurrentFileAST()) CI.takeASTContext(); } else { CI.setASTConsumer(0); if (!isCurrentFileAST()) CI.setASTContext(0); } if (CI.getFrontendOpts().ShowStats) { llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n"; CI.getPreprocessor().PrintStats(); CI.getPreprocessor().getIdentifierTable().PrintStats(); CI.getPreprocessor().getHeaderSearchInfo().PrintStats(); CI.getSourceManager().PrintStats(); llvm::errs() << "\n"; } // Cleanup the output streams, and erase the output files if we encountered // an error. CI.ClearOutputFiles(/*EraseFiles=*/CI.getDiagnostics().getNumErrors()); // Inform the diagnostic client we are done with this source file. CI.getDiagnosticClient().EndSourceFile(); if (isCurrentFileAST()) { CI.takeASTContext(); CI.takePreprocessor(); CI.takeSourceManager(); CI.takeFileManager(); } setCompilerInstance(0); setCurrentFile(""); } //===----------------------------------------------------------------------===// // Utility Actions //===----------------------------------------------------------------------===// void ASTFrontendAction::ExecuteAction() { CompilerInstance &CI = getCompilerInstance(); // FIXME: Move the truncation aspect of this into Sema, we delayed this till // here so the source manager would be initialized. if (hasCodeCompletionSupport() && !CI.getFrontendOpts().CodeCompletionAt.FileName.empty()) CI.createCodeCompletionConsumer(); // Use a code completion consumer? CodeCompleteConsumer *CompletionConsumer = 0; if (CI.hasCodeCompletionConsumer()) CompletionConsumer = &CI.getCodeCompletionConsumer(); ParseAST(CI.getPreprocessor(), &CI.getASTConsumer(), CI.getASTContext(), CI.getFrontendOpts().ShowStats, usesCompleteTranslationUnit(), CompletionConsumer); } ASTConsumer * PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI, llvm::StringRef InFile) { llvm::llvm_unreachable("Invalid CreateASTConsumer on preprocessor action!"); }
/*++ Copyright (C) Microsoft Corporation, 1998 - 1999 Module Name: misc.cxx Abstract: misc helper functions. Author: Johnson Apacible (JohnsonA) 30-Jan-1998 --*/ #include <NTDSpchx.h> #pragma hdrstop #include "ldapsvr.hxx" extern "C" { #include "mdlocal.h" #undef new #undef delete } #define FILENO FILENO_LDAP_MISC LIST_ENTRY PagedBlobListHead; LONG CurrentPageStorageSetSize = 0; LONG PageBlobAllocs = 0; LONG LdapBlobId = 1; CRITICAL_SECTION PagedBlobLock; VOID ZapPagedBlobs( VOID ); PLDAP_PAGED_BLOB AllocatePagedBlob( IN DWORD Size, IN PVOID Blob, IN PLDAP_CONN LdapConn ) /*++ Routine Description: Allocate storage to store paged blobs. Arguments: Size - length in bytes of blob. Blob - Actual blob. LdapConn - Connection to store blob on. Return Value: Pointer to blob. NULL if failure. --*/ { DWORD allocSize; LIST_ENTRY *pTmp; PLDAP_PAGED_BLOB pPaged; PLDAP_PAGED_BLOB pTmpPaged = NULL; allocSize = sizeof(LDAP_PAGED_BLOB) + Size -1; pPaged = (PLDAP_PAGED_BLOB)LdapAlloc(allocSize); IF_DEBUG(SEARCH) { DPRINT2(0,"Allocated paged blob %x [size %d]\n", pPaged, Size); } if ( pPaged != NULL ) { pPaged->Signature = LDAP_PAGED_SIGNATURE; pPaged->LdapConn = LdapConn; pPaged->BlobSize = Size; CopyMemory(pPaged->Blob, Blob, Size); pPaged->BlobId = InterlockedExchangeAdd(&LdapBlobId, 1); // // insert into global list // InterlockedIncrement(&PageBlobAllocs); ACQUIRE_LOCK(&PagedBlobLock); if (LDAP_COOKIES_PER_CONN <= LdapConn->m_CookieCount) { // Remove the oldest paged blob from the LDAP_CONN list. pTmp = RemoveHeadList( &LdapConn->m_CookieList ); pTmpPaged = CONTAINING_RECORD(pTmp, LDAP_PAGED_BLOB, ConnEntry); pTmpPaged->LdapConn = NULL; LdapConn->m_CookieCount--; // Remove it from the global list. RemoveEntryList( &pTmpPaged->ListEntry ); } // Insert the new one in the global list. InsertTailList(&PagedBlobListHead, &pPaged->ListEntry); // ...and in the LDAP_CONN list. InsertTailList(&LdapConn->m_CookieList, &pPaged->ConnEntry); LdapConn->m_CookieCount++; CurrentPageStorageSetSize += Size; RELEASE_LOCK(&PagedBlobLock); IF_DEBUG(WARNING) { if (pTmpPaged) { DPRINT(0, "LDAP_CONN paged blob overflow!\n"); } } // Go ahead and free any overflowed paged blobs now that // we are outside of the lock. FreePagedBlob(pTmpPaged); } Assert( LDAP_COOKIES_PER_CONN >= LdapConn->m_CookieCount ); // // if we've exceeded our limit, try to delete older blobs // if ( (DWORD)CurrentPageStorageSetSize > LdapMaxResultSet ) { ZapPagedBlobs( ); } return pPaged; } // AllocatePagedBlob VOID FreePagedBlob( IN PLDAP_PAGED_BLOB Blob ) /*++ Routine Description: Free paged blobs. Arguments: Blob - Actual blob. Return Value: None. --*/ { if ( Blob != NULL ) { IF_DEBUG(SEARCH) { DPRINT2(0,"Freed paged blob %x [size %d]\n",Blob,Blob->BlobSize); } LdapFree(Blob); InterlockedDecrement(&PageBlobAllocs); Assert(PageBlobAllocs >= 0); } } // FreePagedBlob VOID FreeAllPagedBlobs( IN PLDAP_CONN LdapConn ) /*++ Routine Description: Free all the paged blobs on a particular connection. Arguments: LdapConn - The connection whose blobs are to be freed. Return Value: None. --*/ { LIST_ENTRY *pTmp; PLDAP_PAGED_BLOB pPaged; ACQUIRE_LOCK(&PagedBlobLock); pTmp = LdapConn->m_CookieList.Blink; while (pTmp != &LdapConn->m_CookieList) { pPaged = CONTAINING_RECORD(pTmp, LDAP_PAGED_BLOB, ConnEntry); pTmp = pPaged->ConnEntry.Blink; RemoveEntryList( &pPaged->ConnEntry ); pPaged->LdapConn = NULL; LdapConn->m_CookieCount--; RemoveEntryList( &pPaged->ListEntry ); CurrentPageStorageSetSize -= pPaged->BlobSize; Assert(CurrentPageStorageSetSize >= 0); FreePagedBlob( pPaged ); } Assert( LdapConn->m_CookieCount == 0 ); RELEASE_LOCK(&PagedBlobLock); } // FreeAllPagedBlobs PLDAP_PAGED_BLOB ReleasePagedBlob( IN PLDAP_CONN LdapConn, IN DWORD BlobId, IN BOOL FreeBlob ) /*++ Routine Description: Remove paged blob from queue and decrement. Arguments: LdapConn - connection blob is associated to. BlobId - The ID used to find the correct blob. FreeBlob - Should blob be freed Return Value: Blob that was released --*/ { PLDAP_PAGED_BLOB pPaged = NULL; PLDAP_PAGED_BLOB pTmpPaged = NULL; LIST_ENTRY *pTmp; ACQUIRE_LOCK(&PagedBlobLock); // See if the BlobId passed in exists on this connection. for (pTmp = LdapConn->m_CookieList.Blink; pTmp != &LdapConn->m_CookieList; pTmp = pTmp->Blink) { pTmpPaged = CONTAINING_RECORD(pTmp, LDAP_PAGED_BLOB, ConnEntry); if (pTmpPaged->BlobId == BlobId) { pPaged = pTmpPaged; break; } } if ( pPaged != NULL ) { Assert(LdapConn == pPaged->LdapConn); IF_DEBUG(SEARCH) { DPRINT2(0,"Released paged blob %x[id %d]\n", pPaged, pPaged->BlobId); } CurrentPageStorageSetSize -= pPaged->BlobSize; Assert(CurrentPageStorageSetSize >= 0); RemoveEntryList(&pPaged->ListEntry); RemoveEntryList(&pPaged->ConnEntry); LdapConn->m_CookieCount--; pPaged->LdapConn = NULL; RELEASE_LOCK(&PagedBlobLock); if ( FreeBlob ) { FreePagedBlob(pPaged); pPaged = NULL; } } else { RELEASE_LOCK(&PagedBlobLock); } return pPaged; } // ReleasePagedBlob VOID ZapPagedBlobs( VOID ) { // // zap only if more than 3 // IF_DEBUG(WARNING) { DPRINT2(0,"Zapping blobs, max paged storage exceeded [cur %d max %d]\n", CurrentPageStorageSetSize, LdapMaxResultSet); } while ( ((DWORD)CurrentPageStorageSetSize > LdapMaxResultSet) && (PageBlobAllocs > 3) ) { PLIST_ENTRY listEntry; PLDAP_PAGED_BLOB pPaged; ACQUIRE_LOCK( &PagedBlobLock ); if ( PagedBlobListHead.Flink != &PagedBlobListHead ) { listEntry = RemoveHeadList(&PagedBlobListHead); pPaged = CONTAINING_RECORD(listEntry, LDAP_PAGED_BLOB, ListEntry ); RemoveEntryList(&pPaged->ConnEntry); pPaged->LdapConn->m_CookieCount--; CurrentPageStorageSetSize -= pPaged->BlobSize; pPaged->LdapConn = NULL; FreePagedBlob(pPaged); } else { Assert(CurrentPageStorageSetSize == 0); } RELEASE_LOCK( &PagedBlobLock ); } return; } // ZapPagedBlobs /*-------------------------------------------------------------------------*/ /*-------------------------------------------------------------------------*/ /* This function sets an ldap error for output */ _enum1 DoSetLdapError ( IN _enum1 code, IN DWORD Win32Err, IN DWORD CommentId, IN DWORD Data, IN DWORD Dsid, OUT LDAPString *pError ) { CHAR pTempBuff[1024]; DWORD cbTempBuff = 0; PCHAR pString = NULL; THSTATE *pTHS=pTHStls; BOOL ok; pError->value = NULL; pError->length = 0; pTempBuff[sizeof(pTempBuff)-1] = '\0'; _snprintf(pTempBuff, sizeof(pTempBuff) - 1, "%08X: LdapErr: DSID-%08X, comment: %s, data %x, v%x", Win32Err, (gulHideDSID == DSID_HIDE_ALL) ? 0 : Dsid, LdapComments[CommentId], Data, LdapBuildNo); cbTempBuff = strlen(pTempBuff); if ( cbTempBuff == 0 ) { goto exit; } pString = (PCHAR)THAlloc(cbTempBuff+1); if ( pString == NULL ) { cbTempBuff = 0; goto exit; } cbTempBuff++; strcpy(pString,pTempBuff); LogEvent(DS_EVENT_CAT_LDAP_INTERFACE, DS_EVENT_SEV_MINIMAL, DIRLOG_LDAP_EXT_ERROR, szInsertSz(pString), NULL, NULL); IF_DEBUG(ERR_NORMAL) { DPRINT1(0,"Returning extended error string %s\n", pString); } exit: pError->value = (PUCHAR)pString; pError->length = cbTempBuff; return code; } // DoSetLdapError BOOL IsContextExpired( IN PTimeStamp tsContextExpires, IN PTimeStamp tsLocal ) /*++ Routine Description: Check if context has expired based on the time given. Arguments: tsContextExpires - expiration time for context tsLocal - Current local time. If not present, we need to get the time for the caller. Return Value: TRUE, if time has expired FALSE, otherwise --*/ { // // context has expired? // if (tsContextExpires->QuadPart <= tsLocal->QuadPart) { // yup return TRUE; } return FALSE; } // IsContextExpired DWORD GetNextAtqTimeout( IN PTimeStamp tsContextExpires, IN PTimeStamp tsLocal, IN DWORD DefaultIdleTime ) { DWORD timeout = 30; // 30 seconds minimum LARGE_INTEGER liDiff; // // if the context had already expired, give the timeout thread 30 seconds to cleanup. // if ( tsContextExpires->QuadPart <= tsLocal->QuadPart ) { goto exit; } // // get the difference and convert to seconds // liDiff.QuadPart = tsContextExpires->QuadPart - tsLocal->QuadPart; liDiff.QuadPart /= (LONGLONG)(10*1000*1000); // // if the high part is not 0, then the default timeout always wins // if ( liDiff.HighPart != 0 ) { timeout = 0xFFFFFFFF; } else { timeout = liDiff.LowPart; } if ( timeout > DefaultIdleTime ) { timeout = DefaultIdleTime; } exit: return timeout; } // GetNextAtqTimeout
// Copyright (c) 2012-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include "bloom.h" #include "base58.h" #include "clientversion.h" #include "key.h" #include "merkleblock.h" #include "random.h" #include "serialize.h" #include "streams.h" #include "uint256.h" #include "util.h" #include "utilstrencodings.h" #include "test/test_youlike.h" #include <vector> #include <boost/test/unit_test.hpp> #include <boost/tuple/tuple.hpp> BOOST_FIXTURE_TEST_SUITE(bloom_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize) { CBloomFilter filter(3, 0.01, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")); BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!"); // One bit different in first byte BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter contains something it shouldn't!"); filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")); BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "Bloom filter doesn't contain just-inserted object (2)!"); filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")); BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!"); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << filter; std::vector<unsigned char> vch = ParseHex("03614e9b050000000000000001"); std::vector<char> expected(vch.size()); for (unsigned int i = 0; i < vch.size(); i++) expected[i] = (char)vch[i]; BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!"); filter.clear(); BOOST_CHECK_MESSAGE( !filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter should be empty!"); } BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak) { // Same test as bloom_create_insert_serialize, but we add a nTweak of 100 CBloomFilter filter(3, 0.01, 2147483649UL, BLOOM_UPDATE_ALL); filter.insert(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")); BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!"); // One bit different in first byte BOOST_CHECK_MESSAGE(!filter.contains(ParseHex("19108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter contains something it shouldn't!"); filter.insert(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")); BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b5a2c786d9ef4658287ced5914b37a1b4aa32eee")), "Bloom filter doesn't contain just-inserted object (2)!"); filter.insert(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")); BOOST_CHECK_MESSAGE(filter.contains(ParseHex("b9300670b4c5366e95b2699e8b18bc75e5f729c5")), "Bloom filter doesn't contain just-inserted object (3)!"); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << filter; std::vector<unsigned char> vch = ParseHex("03ce4299050000000100008001"); std::vector<char> expected(vch.size()); for (unsigned int i = 0; i < vch.size(); i++) expected[i] = (char)vch[i]; BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); } BOOST_AUTO_TEST_CASE(bloom_create_insert_key) { std::string strSecret = std::string("7sQb6QHALg4XyHsJHsSNXnEHGhZfzTTUPJXJqaqK7CavQkiL9Ms"); CBitcoinSecret vchSecret; BOOST_CHECK(vchSecret.SetString(strSecret)); CKey key = vchSecret.GetKey(); CPubKey pubkey = key.GetPubKey(); std::vector<unsigned char> vchPubKey(pubkey.begin(), pubkey.end()); CBloomFilter filter(2, 0.001, 0, BLOOM_UPDATE_ALL); filter.insert(vchPubKey); uint160 hash = pubkey.GetID(); filter.insert(std::vector<unsigned char>(hash.begin(), hash.end())); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << filter; std::vector<unsigned char> vch = ParseHex("038fc16b080000000000000001"); std::vector<char> expected(vch.size()); for (unsigned int i = 0; i < vch.size(); i++) expected[i] = (char)vch[i]; BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); } BOOST_AUTO_TEST_CASE(bloom_match) { // Random real transaction (b4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b) CDataStream stream(ParseHex("01000000010b26e9b7735eb6aabdf358bab62f9816a21ba9ebdb719d5299e88607d722c190000000008b4830450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d27d8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a0141046d11fee51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5eeef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c339ffffffff021bff3d11000000001976a91404943fdd508053c75000106d3bc6e2754dbcff1988ac2f15de00000000001976a914a266436d2965547608b9e15d9032a7b9d64fa43188ac00000000"), SER_DISK, CLIENT_VERSION); CTransaction tx(deserialize, stream); // and one which spends it (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436) unsigned char ch[] = {0x01, 0x00, 0x00, 0x00, 0x01, 0x6b, 0xff, 0x7f, 0xcd, 0x4f, 0x85, 0x65, 0xef, 0x40, 0x6d, 0xd5, 0xd6, 0x3d, 0x4f, 0xf9, 0x4f, 0x31, 0x8f, 0xe8, 0x20, 0x27, 0xfd, 0x4d, 0xc4, 0x51, 0xb0, 0x44, 0x74, 0x01, 0x9f, 0x74, 0xb4, 0x00, 0x00, 0x00, 0x00, 0x8c, 0x49, 0x30, 0x46, 0x02, 0x21, 0x00, 0xda, 0x0d, 0xc6, 0xae, 0xce, 0xfe, 0x1e, 0x06, 0xef, 0xdf, 0x05, 0x77, 0x37, 0x57, 0xde, 0xb1, 0x68, 0x82, 0x09, 0x30, 0xe3, 0xb0, 0xd0, 0x3f, 0x46, 0xf5, 0xfc, 0xf1, 0x50, 0xbf, 0x99, 0x0c, 0x02, 0x21, 0x00, 0xd2, 0x5b, 0x5c, 0x87, 0x04, 0x00, 0x76, 0xe4, 0xf2, 0x53, 0xf8, 0x26, 0x2e, 0x76, 0x3e, 0x2d, 0xd5, 0x1e, 0x7f, 0xf0, 0xbe, 0x15, 0x77, 0x27, 0xc4, 0xbc, 0x42, 0x80, 0x7f, 0x17, 0xbd, 0x39, 0x01, 0x41, 0x04, 0xe6, 0xc2, 0x6e, 0xf6, 0x7d, 0xc6, 0x10, 0xd2, 0xcd, 0x19, 0x24, 0x84, 0x78, 0x9a, 0x6c, 0xf9, 0xae, 0xa9, 0x93, 0x0b, 0x94, 0x4b, 0x7e, 0x2d, 0xb5, 0x34, 0x2b, 0x9d, 0x9e, 0x5b, 0x9f, 0xf7, 0x9a, 0xff, 0x9a, 0x2e, 0xe1, 0x97, 0x8d, 0xd7, 0xfd, 0x01, 0xdf, 0xc5, 0x22, 0xee, 0x02, 0x28, 0x3d, 0x3b, 0x06, 0xa9, 0xd0, 0x3a, 0xcf, 0x80, 0x96, 0x96, 0x8d, 0x7d, 0xbb, 0x0f, 0x91, 0x78, 0xff, 0xff, 0xff, 0xff, 0x02, 0x8b, 0xa7, 0x94, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xba, 0xde, 0xec, 0xfd, 0xef, 0x05, 0x07, 0x24, 0x7f, 0xc8, 0xf7, 0x42, 0x41, 0xd7, 0x3b, 0xc0, 0x39, 0x97, 0x2d, 0x7b, 0x88, 0xac, 0x40, 0x94, 0xa8, 0x02, 0x00, 0x00, 0x00, 0x00, 0x19, 0x76, 0xa9, 0x14, 0xc1, 0x09, 0x32, 0x48, 0x3f, 0xec, 0x93, 0xed, 0x51, 0xf5, 0xfe, 0x95, 0xe7, 0x25, 0x59, 0xf2, 0xcc, 0x70, 0x43, 0xf9, 0x88, 0xac, 0x00, 0x00, 0x00, 0x00, 0x00}; std::vector<unsigned char> vch(ch, ch + sizeof(ch) -1); CDataStream spendStream(vch, SER_DISK, CLIENT_VERSION); CTransaction spendingTx(deserialize, spendStream); CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(uint256S("0xb4749f017444b051c44dfd2720e88f314ff94f3dd6d56d40ef65854fcd7fff6b")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match tx hash"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); // byte-reversed tx hash filter.insert(ParseHex("6bff7fcd4f8565ef406dd5d63d4ff94f318fe82027fd4dc451b04474019f74b4")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match manually serialized tx hash"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("30450220070aca44506c5cef3a16ed519d7c3c39f8aab192c4e1c90d065f37b8a4af6141022100a8e160b856c2d43d27d8fba71e5aef6405b8643ac4cb7cb3c462aced7f14711a01")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match input signature"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("046d11fee51b0e60666d5049a9101a72741df480b96ee26488a4d3466b95c9a40ac5eeef87e10a5cd336c19a84565f80fa6c547957b7700ff4dfbdefe76036c339")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match input pub key"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("04943fdd508053c75000106d3bc6e2754dbcff19")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match output address"); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(spendingTx), "Simple Bloom filter didn't add output"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("a266436d2965547608b9e15d9032a7b9d64fa431")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match output address"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(COutPoint(uint256S("0x90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"), 0)); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match COutPoint"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); COutPoint prevOutPoint(uint256S("0x90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"), 0); { std::vector<unsigned char> data(32 + sizeof(unsigned int)); memcpy(&data[0], prevOutPoint.hash.begin(), 32); memcpy(&data[32], &prevOutPoint.n, sizeof(unsigned int)); filter.insert(data); } BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(tx), "Simple Bloom filter didn't match manually serialized COutPoint"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(uint256S("00000009e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436")); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx), "Simple Bloom filter matched random tx hash"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("0000006d2965547608b9e15d9032a7b9d64fa431")); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx), "Simple Bloom filter matched random address"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(COutPoint(uint256S("0x90c122d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"), 1)); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx), "Simple Bloom filter matched COutPoint for an output we didn't care about"); filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(COutPoint(uint256S("0x000000d70786e899529d71dbeba91ba216982fb6ba58f3bdaab65e73b7e9260b"), 0)); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(tx), "Simple Bloom filter matched COutPoint for an output we didn't care about"); } BOOST_AUTO_TEST_CASE(dip2_bloom_match) { // ProRegTx from testnet (txid: 39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed461) CDataStream stream(ParseHex("0300010001c02142f16969d106f4cb144e17975bf9261b8e331354879c6d006e71494b3460000000006b483045022100d6eb9e25fc3215e6a4b5180a1957319d6658103566f0274452bd50d0eddedff202200824a00f582a29c9e3694d34e5525275cabd08c488a0c55b4350c6bfa4fb2cf8012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff0121c89a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000fd12010100000000009b9054ff7839b940277b8eb8211570b2f16850ef729ee635e24d722fbc4a46230100000000000000000000000000ffffc6c74af14e1f891cbc8a94fa7fea64ca9994870dca0f75bbd0750efda51589f86e30cc2305e7388c01ce0309c19a182cf37bced97c7da72236f660c0a395e765e6e06962ecff5a69d7de359c348a574176c210c37a25d4ffd917866fb0a300001976a914e54445646929fac8b7d6c71715913af44324978488ac26d5a99c01521d1fca6299de576bae82f901fb56e4b797a945e876ac69068f36411f9cdb72a01b273a53bd916d8c90dd584bd3a1c01dd84fec84eb046f66a4e3b30d39ea3215293a035166b5072349ebc08efd30c2c73ebd023d920f1db0aef91a4e"), SER_DISK, CLIENT_VERSION); CTransaction proregtx(deserialize, stream); // ProUpServTx from testnet (txid: 0c9627a054784f207db34c311b06e8bb797c4a120a236dbb02b51268dd186d39) stream = CDataStream(ParseHex("030002000161d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a139000000006a473044022036dfd41268e3a387b909d9d1cf61e4d5e97b6b37d5951e82c4ca4fed419b4b6a02203777d818319897a11cf4cd8b4c6495c5c3a5f96fcad6d3a0596431b1146b53c6012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff01a1c69a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000b5010061d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a13900000000000000000000ffffc6c74af14e1f000ccf39b295938f4021fc16b23e4eedbd64a6c9a5aaae6bc9ca085db4bc29ff911612dc4544fc470415cdd4be5659fee6aa7d84c3572c712f5904b2e5d38cb217762717d88e4e1a956a9e82ce132e2b9b144e26cb3ff1e53675ede2769d99f46796ea0b8cbf33b78c15fe9a437dc4d1131ce2af8fd2ed5b306b326f9fcffcd416"), SER_DISK, CLIENT_VERSION); CTransaction proupservtx(deserialize, stream); // ProUpRegTx from testnet (txid: 3037bf4e45d1a9d57891ea8baa72b38ad6323bd28945b11325bdd33ebea049b3) stream = CDataStream(ParseHex("0300030001396d18dd6812b502bb6d230a124a7c79bbe8061b314cb37d204f7854a027960c000000006a47304402203aefa0e1a08469050edc1e5da6e24f783162bebc15cc006f717097e56e07fa0b02207c7f47d113355b095c7edd2408d0554aa6b87187bf95652c516fc7747720fb19012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff01f2c49a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000e4010061d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a13900000efda51589f86e30cc2305e7388c01ce0309c19a182cf37bced97c7da72236f660c0a395e765e6e06962ecff5a69d7de359c348a574176c210c37a25d4ffd917866fb0a31976a914e54445646929fac8b7d6c71715913af44324978488aca8141db635af714f32c7415b298937e171c6bb8a3c8627852ae2b69917ece86a411fa51861025bb5ff4ba4ad3be6090c4ba76b1671d70799ed6882c57bcfeaf27cbd558068e9b5fc1553abce8822fcee63e8f6fbb06ad5b753d47e794bfacadbde3f"), SER_DISK, CLIENT_VERSION); CTransaction proupregtx(deserialize, stream); // ProUpRevTx from testnet (txid: 70c41a5fea07a80c1297771c43eb58b8f52222b971f61f5c7c2030ec0baf7f9a) stream = CDataStream(ParseHex("0300040001b349a0be3ed3bd2513b14589d23b32d68ab372aa8bea9178d5a9d1454ebf3730000000006a4730440220428d87daf3b0ddca9656d59592000f9930a88162c0b017ca460c69c33b76bda202200276a521d94acc57e010262aa1d7d016cebbe7a9806d26dbebb69102ce9cf7b2012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff0183c39a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000a4010061d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a1390000c024ffd34d15686f7a376ecdf45800ddcebf4eb966fd3123cf9450515a38881a0de852c97297bbd9cedb79280f3f1e31b092904a8870274a5a0e52de2ae1dc0bad74851dc0a1e4fe4c274535a2422d3e1510a7be2fbecaae73128e8eb2f336382fb376d2f82273a72960980972b02e3ecf895c00187e0cbe735dc44cb3d97711"), SER_DISK, CLIENT_VERSION); CTransaction prouprevtx(deserialize, stream); // check collateral outpoint match in ProRegTx CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(COutPoint(uint256S("0x23464abc2f724de235e69e72ef5068f1b2701521b88e7b2740b93978ff54909b"), 1)); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proregtx), "Bloom filter didn't match collateral outpoint in ProRegTx"); // check owner keyid match in ProRegTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("891cbc8a94fa7fea64ca9994870dca0f75bbd075")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proregtx), "Bloom filter didn't match Owner keyid in ProRegTx"); // check voting keyid match in ProRegTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("359c348a574176c210c37a25d4ffd917866fb0a3")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proregtx), "Bloom filter didn't match Voting keyid in ProRegTx"); // check scriptPayout match in ProRegTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("e54445646929fac8b7d6c71715913af443249784")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proregtx), "Bloom filter didn't match scriptPayout in ProRegTx"); // check proTxHash match in ProUpServTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(uint256S("0x39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed461")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupservtx), "Bloom filter didn't match proTxHash in ProUpServTx"); // check scriptOperatorPayout match in ProUpServTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("e54445646929fac8b7d6c71715913af443249784")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupservtx), "Bloom filter didn't match scriptOperatorPayout in ProUpServTx"); // check proTxHash match in ProUpRegTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(uint256S("0x39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed461")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupregtx), "Bloom filter didn't match proTxHash in ProUpRegTx"); // check voting keyid match in ProUpRegTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("359c348a574176c210c37a25d4ffd917866fb0a3")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupregtx), "Bloom filter didn't match Voting keyid in ProUpRegTx"); // check scriptPayout match in ProUpRegTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("e54445646929fac8b7d6c71715913af443249784")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupregtx), "Bloom filter didn't match scriptPayout in ProUpRegTx"); // check proTxHash match in ProUpRevTx filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(uint256S("0x39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed461")); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(prouprevtx), "Bloom filter didn't match proTxHash in ProUpRevTx"); // check filter is not matching if it doesn't contain relative data filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); // extend real data with additional byte filter.insert(uint256S("0x39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed46100")); filter.insert(ParseHex("e54445646929fac8b7d6c71715913af44324978400")); filter.insert(ParseHex("359c348a574176c210c37a25d4ffd917866fb0a300")); filter.insert(uint256S("0x39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed46100")); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(prouprevtx), "Bloom filter match unrelated data"); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(proupregtx), "Bloom filter match unrelated data"); } BOOST_AUTO_TEST_CASE(dip2_bloom_update) { // ProRegTx from testnet (txid: 39a1339d9bf26de701345beecc5de75a690bc9533741a3dbe90f2fd88b8ed461) CDataStream stream(ParseHex("0300010001c02142f16969d106f4cb144e17975bf9261b8e331354879c6d006e71494b3460000000006b483045022100d6eb9e25fc3215e6a4b5180a1957319d6658103566f0274452bd50d0eddedff202200824a00f582a29c9e3694d34e5525275cabd08c488a0c55b4350c6bfa4fb2cf8012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff0121c89a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000fd12010100000000009b9054ff7839b940277b8eb8211570b2f16850ef729ee635e24d722fbc4a46230100000000000000000000000000ffffc6c74af14e1f891cbc8a94fa7fea64ca9994870dca0f75bbd0750efda51589f86e30cc2305e7388c01ce0309c19a182cf37bced97c7da72236f660c0a395e765e6e06962ecff5a69d7de359c348a574176c210c37a25d4ffd917866fb0a300001976a914e54445646929fac8b7d6c71715913af44324978488ac26d5a99c01521d1fca6299de576bae82f901fb56e4b797a945e876ac69068f36411f9cdb72a01b273a53bd916d8c90dd584bd3a1c01dd84fec84eb046f66a4e3b30d39ea3215293a035166b5072349ebc08efd30c2c73ebd023d920f1db0aef91a4e"), SER_DISK, CLIENT_VERSION); CTransaction proregtx(deserialize, stream); // ProUpServTx from testnet (txid: 0c9627a054784f207db34c311b06e8bb797c4a120a236dbb02b51268dd186d39) stream = CDataStream(ParseHex("030002000161d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a139000000006a473044022036dfd41268e3a387b909d9d1cf61e4d5e97b6b37d5951e82c4ca4fed419b4b6a02203777d818319897a11cf4cd8b4c6495c5c3a5f96fcad6d3a0596431b1146b53c6012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff01a1c69a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000b5010061d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a13900000000000000000000ffffc6c74af14e1f000ccf39b295938f4021fc16b23e4eedbd64a6c9a5aaae6bc9ca085db4bc29ff911612dc4544fc470415cdd4be5659fee6aa7d84c3572c712f5904b2e5d38cb217762717d88e4e1a956a9e82ce132e2b9b144e26cb3ff1e53675ede2769d99f46796ea0b8cbf33b78c15fe9a437dc4d1131ce2af8fd2ed5b306b326f9fcffcd416"), SER_DISK, CLIENT_VERSION); CTransaction proupservtx(deserialize, stream); // ProUpRegTx from testnet (txid: 3037bf4e45d1a9d57891ea8baa72b38ad6323bd28945b11325bdd33ebea049b3) stream = CDataStream(ParseHex("0300030001396d18dd6812b502bb6d230a124a7c79bbe8061b314cb37d204f7854a027960c000000006a47304402203aefa0e1a08469050edc1e5da6e24f783162bebc15cc006f717097e56e07fa0b02207c7f47d113355b095c7edd2408d0554aa6b87187bf95652c516fc7747720fb19012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff01f2c49a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000e4010061d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a13900000efda51589f86e30cc2305e7388c01ce0309c19a182cf37bced97c7da72236f660c0a395e765e6e06962ecff5a69d7de359c348a574176c210c37a25d4ffd917866fb0a31976a914e54445646929fac8b7d6c71715913af44324978488aca8141db635af714f32c7415b298937e171c6bb8a3c8627852ae2b69917ece86a411fa51861025bb5ff4ba4ad3be6090c4ba76b1671d70799ed6882c57bcfeaf27cbd558068e9b5fc1553abce8822fcee63e8f6fbb06ad5b753d47e794bfacadbde3f"), SER_DISK, CLIENT_VERSION); CTransaction proupregtx(deserialize, stream); // ProUpRevTx from testnet (txid: 70c41a5fea07a80c1297771c43eb58b8f52222b971f61f5c7c2030ec0baf7f9a) stream = CDataStream(ParseHex("0300040001b349a0be3ed3bd2513b14589d23b32d68ab372aa8bea9178d5a9d1454ebf3730000000006a4730440220428d87daf3b0ddca9656d59592000f9930a88162c0b017ca460c69c33b76bda202200276a521d94acc57e010262aa1d7d016cebbe7a9806d26dbebb69102ce9cf7b2012102a8d6433a8f799a13d3495f4aa5b99bd288adca5f59e0c9609f15a221220241c5feffffff0183c39a3b000000001976a914e54445646929fac8b7d6c71715913af44324978488ac00000000a4010061d48e8bd82f0fe9dba3413753c90b695ae75dccee5b3401e76df29b9d33a1390000c024ffd34d15686f7a376ecdf45800ddcebf4eb966fd3123cf9450515a38881a0de852c97297bbd9cedb79280f3f1e31b092904a8870274a5a0e52de2ae1dc0bad74851dc0a1e4fe4c274535a2422d3e1510a7be2fbecaae73128e8eb2f336382fb376d2f82273a72960980972b02e3ecf895c00187e0cbe735dc44cb3d97711"), SER_DISK, CLIENT_VERSION); CTransaction prouprevtx(deserialize, stream); // if ProRegTx matches, all related pro txes match too CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(COutPoint(uint256S("0x23464abc2f724de235e69e72ef5068f1b2701521b88e7b2740b93978ff54909b"), 1)); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(proupservtx), "Bloom filter matches without update"); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proregtx), "Bloom filter didn't match collateral outpoint in ProRegTx"); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupservtx), "Bloom filter wasn't updated with proregtx hash"); // if ProUpRegTx matches, all related pro txes match too filter = CBloomFilter(10, 0.000001, 0, BLOOM_UPDATE_ALL); filter.insert(ParseHex("359c348a574176c210c37a25d4ffd917866fb0a3")); BOOST_CHECK_MESSAGE(!filter.IsRelevantAndUpdate(proupservtx), "Bloom filter matches without update"); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupregtx), "Bloom filter didn't match Voting keyid in ProUpRegTx"); BOOST_CHECK_MESSAGE(filter.IsRelevantAndUpdate(proupservtx), "Bloom filter wasn't updated with proregtx hash"); } BOOST_AUTO_TEST_CASE(merkle_block_1) { // Random real block (0000000000013b8ab2cd513b0261a14096412195a72a0c4827d229dcc7e0f7af) // With 9 txes CBlock block; CDataStream stream(ParseHex("0100000090f0a9f110702f808219ebea1173056042a714bad51b916cb6800000000000005275289558f51c9966699404ae2294730c3c9f9bda53523ce50e9b95e558da2fdb261b4d4c86041b1ab1bf930901000000010000000000000000000000000000000000000000000000000000000000000000ffffffff07044c86041b0146ffffffff0100f2052a01000000434104e18f7afbe4721580e81e8414fc8c24d7cfacf254bb5c7b949450c3e997c2dc1242487a8169507b631eb3771f2b425483fb13102c4eb5d858eef260fe70fbfae0ac00000000010000000196608ccbafa16abada902780da4dc35dafd7af05fa0da08cf833575f8cf9e836000000004a493046022100dab24889213caf43ae6adc41cf1c9396c08240c199f5225acf45416330fd7dbd022100fe37900e0644bf574493a07fc5edba06dbc07c311b947520c2d514bc5725dcb401ffffffff0100f2052a010000001976a914f15d1921f52e4007b146dfa60f369ed2fc393ce288ac000000000100000001fb766c1288458c2bafcfec81e48b24d98ec706de6b8af7c4e3c29419bfacb56d000000008c493046022100f268ba165ce0ad2e6d93f089cfcd3785de5c963bb5ea6b8c1b23f1ce3e517b9f022100da7c0f21adc6c401887f2bfd1922f11d76159cbc597fbd756a23dcbb00f4d7290141042b4e8625a96127826915a5b109852636ad0da753c9e1d5606a50480cd0c40f1f8b8d898235e571fe9357d9ec842bc4bba1827daaf4de06d71844d0057707966affffffff0280969800000000001976a9146963907531db72d0ed1a0cfb471ccb63923446f388ac80d6e34c000000001976a914f0688ba1c0d1ce182c7af6741e02658c7d4dfcd388ac000000000100000002c40297f730dd7b5a99567eb8d27b78758f607507c52292d02d4031895b52f2ff010000008b483045022100f7edfd4b0aac404e5bab4fd3889e0c6c41aa8d0e6fa122316f68eddd0a65013902205b09cc8b2d56e1cd1f7f2fafd60a129ed94504c4ac7bdc67b56fe67512658b3e014104732012cb962afa90d31b25d8fb0e32c94e513ab7a17805c14ca4c3423e18b4fb5d0e676841733cb83abaf975845c9f6f2a8097b7d04f4908b18368d6fc2d68ecffffffffca5065ff9617cbcba45eb23726df6498a9b9cafed4f54cbab9d227b0035ddefb000000008a473044022068010362a13c7f9919fa832b2dee4e788f61f6f5d344a7c2a0da6ae740605658022006d1af525b9a14a35c003b78b72bd59738cd676f845d1ff3fc25049e01003614014104732012cb962afa90d31b25d8fb0e32c94e513ab7a17805c14ca4c3423e18b4fb5d0e676841733cb83abaf975845c9f6f2a8097b7d04f4908b18368d6fc2d68ecffffffff01001ec4110200000043410469ab4181eceb28985b9b4e895c13fa5e68d85761b7eee311db5addef76fa8621865134a221bd01f28ec9999ee3e021e60766e9d1f3458c115fb28650605f11c9ac000000000100000001cdaf2f758e91c514655e2dc50633d1e4c84989f8aa90a0dbc883f0d23ed5c2fa010000008b48304502207ab51be6f12a1962ba0aaaf24a20e0b69b27a94fac5adf45aa7d2d18ffd9236102210086ae728b370e5329eead9accd880d0cb070aea0c96255fae6c4f1ddcce1fd56e014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffff02404b4c00000000001976a9142b6ba7c9d796b75eef7942fc9288edd37c32f5c388ac002d3101000000001976a9141befba0cdc1ad56529371864d9f6cb042faa06b588ac000000000100000001b4a47603e71b61bc3326efd90111bf02d2f549b067f4c4a8fa183b57a0f800cb010000008a4730440220177c37f9a505c3f1a1f0ce2da777c339bd8339ffa02c7cb41f0a5804f473c9230220585b25a2ee80eb59292e52b987dad92acb0c64eced92ed9ee105ad153cdb12d001410443bd44f683467e549dae7d20d1d79cbdb6df985c6e9c029c8d0c6cb46cc1a4d3cf7923c5021b27f7a0b562ada113bc85d5fda5a1b41e87fe6e8802817cf69996ffffffff0280651406000000001976a9145505614859643ab7b547cd7f1f5e7e2a12322d3788ac00aa0271000000001976a914ea4720a7a52fc166c55ff2298e07baf70ae67e1b88ac00000000010000000586c62cd602d219bb60edb14a3e204de0705176f9022fe49a538054fb14abb49e010000008c493046022100f2bc2aba2534becbdf062eb993853a42bbbc282083d0daf9b4b585bd401aa8c9022100b1d7fd7ee0b95600db8535bbf331b19eed8d961f7a8e54159c53675d5f69df8c014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffff03ad0e58ccdac3df9dc28a218bcf6f1997b0a93306faaa4b3a28ae83447b2179010000008b483045022100be12b2937179da88599e27bb31c3525097a07cdb52422d165b3ca2f2020ffcf702200971b51f853a53d644ebae9ec8f3512e442b1bcb6c315a5b491d119d10624c83014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffff2acfcab629bbc8685792603762c921580030ba144af553d271716a95089e107b010000008b483045022100fa579a840ac258871365dd48cd7552f96c8eea69bd00d84f05b283a0dab311e102207e3c0ee9234814cfbb1b659b83671618f45abc1326b9edcc77d552a4f2a805c0014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffffdcdc6023bbc9944a658ddc588e61eacb737ddf0a3cd24f113b5a8634c517fcd2000000008b4830450221008d6df731df5d32267954bd7d2dda2302b74c6c2a6aa5c0ca64ecbabc1af03c75022010e55c571d65da7701ae2da1956c442df81bbf076cdbac25133f99d98a9ed34c014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffffe15557cd5ce258f479dfd6dc6514edf6d7ed5b21fcfa4a038fd69f06b83ac76e010000008b483045022023b3e0ab071eb11de2eb1cc3a67261b866f86bf6867d4558165f7c8c8aca2d86022100dc6e1f53a91de3efe8f63512850811f26284b62f850c70ca73ed5de8771fb451014104462e76fd4067b3a0aa42070082dcb0bf2f388b6495cf33d789904f07d0f55c40fbd4b82963c69b3dc31895d0c772c812b1d5fbcade15312ef1c0e8ebbb12dcd4ffffffff01404b4c00000000001976a9142b6ba7c9d796b75eef7942fc9288edd37c32f5c388ac00000000010000000166d7577163c932b4f9690ca6a80b6e4eb001f0a2fa9023df5595602aae96ed8d000000008a4730440220262b42546302dfb654a229cefc86432b89628ff259dc87edd1154535b16a67e102207b4634c020a97c3e7bbd0d4d19da6aa2269ad9dded4026e896b213d73ca4b63f014104979b82d02226b3a4597523845754d44f13639e3bf2df5e82c6aab2bdc79687368b01b1ab8b19875ae3c90d661a3d0a33161dab29934edeb36aa01976be3baf8affffffff02404b4c00000000001976a9144854e695a02af0aeacb823ccbc272134561e0a1688ac40420f00000000001976a914abee93376d6b37b5c2940655a6fcaf1c8e74237988ac0000000001000000014e3f8ef2e91349a9059cb4f01e54ab2597c1387161d3da89919f7ea6acdbb371010000008c49304602210081f3183471a5ca22307c0800226f3ef9c353069e0773ac76bb580654d56aa523022100d4c56465bdc069060846f4fbf2f6b20520b2a80b08b168b31e66ddb9c694e240014104976c79848e18251612f8940875b2b08d06e6dc73b9840e8860c066b7e87432c477e9a59a453e71e6d76d5fe34058b800a098fc1740ce3012e8fc8a00c96af966ffffffff02c0e1e400000000001976a9144134e75a6fcb6042034aab5e18570cf1f844f54788ac404b4c00000000001976a9142b6ba7c9d796b75eef7942fc9288edd37c32f5c388ac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); // Match the last transaction filter.insert(uint256S("0x74d681e0e03bafa802c8aa084379aa98d9fcd632ddc2ed9782b586ec87451f20")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1); std::pair<unsigned int, uint256> pair = merkleBlock.vMatchedTxn[0]; BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0x74d681e0e03bafa802c8aa084379aa98d9fcd632ddc2ed9782b586ec87451f20")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 8); std::vector<uint256> vMatched; std::vector<unsigned int> vIndex; BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); // Also match the 8th transaction filter.insert(uint256S("0xdd1fd2a6fc16404faf339881a90adbde7f4f728691ac62e8f168809cdfae1053")); merkleBlock = CMerkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 2); BOOST_CHECK(merkleBlock.vMatchedTxn[1] == pair); BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0xdd1fd2a6fc16404faf339881a90adbde7f4f728691ac62e8f168809cdfae1053")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 7); BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); } BOOST_AUTO_TEST_CASE(merkle_block_2) { // Random real block (000000005a4ded781e667e06ceefafb71410b511fe0d5adc3e5a27ecbec34ae6) // With 4 txes CBlock block; CDataStream stream(ParseHex("0100000075616236cc2126035fadb38deb65b9102cc2c41c09cdf29fc051906800000000fe7d5e12ef0ff901f6050211249919b1c0653771832b3a80c66cea42847f0ae1d4d26e49ffff001d00f0a4410401000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d029105ffffffff0100f2052a010000004341046d8709a041d34357697dfcb30a9d05900a6294078012bf3bb09c6f9b525f1d16d5503d7905db1ada9501446ea00728668fc5719aa80be2fdfc8a858a4dbdd4fbac00000000010000000255605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c268d28350000000049483045022100aa46504baa86df8a33b1192b1b9367b4d729dc41e389f2c04f3e5c7f0559aae702205e82253a54bf5c4f65b7428551554b2045167d6d206dfe6a2e198127d3f7df1501ffffffff55605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c268d2835010000004847304402202329484c35fa9d6bb32a55a70c0982f606ce0e3634b69006138683bcd12cbb6602200c28feb1e2555c3210f1dddb299738b4ff8bbe9667b68cb8764b5ac17b7adf0001ffffffff0200e1f505000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45afac0000000001000000025f9a06d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c2028000000004847304402205d6058484157235b06028c30736c15613a28bdb768ee628094ca8b0030d4d6eb0220328789c9a2ec27ddaec0ad5ef58efded42e6ea17c2e1ce838f3d6913f5e95db601ffffffff5f9a06d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c2028010000004a493046022100c45af050d3cea806cedd0ab22520c53ebe63b987b8954146cdca42487b84bdd6022100b9b027716a6b59e640da50a864d6dd8a0ef24c76ce62391fa3eabaf4d2886d2d01ffffffff0200e1f505000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac000000000100000002e2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b0000000048473044022016e7a727a061ea2254a6c358376aaa617ac537eb836c77d646ebda4c748aac8b0220192ce28bf9f2c06a6467e6531e27648d2b3e2e2bae85159c9242939840295ba501ffffffffe2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b010000004a493046022100b7a1a755588d4190118936e15cd217d133b0e4a53c3c15924010d5648d8925c9022100aaef031874db2114f2d869ac2de4ae53908fbfea5b2b1862e181626bb9005c9f01ffffffff0200e1f505000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45afac00180d8f000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); // Match the first transaction filter.insert(uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1); std::pair<unsigned int, uint256> pair = merkleBlock.vMatchedTxn[0]; BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0); std::vector<uint256> vMatched; std::vector<unsigned int> vIndex; BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); // Match an output from the second transaction (the pubkey for address 1DZTzaBHUDM7T3QvUKBz4qXMRpkg8jsfB5) // This should match the third transaction because it spends the output matched // It also matches the fourth transaction, which spends to the pubkey again filter.insert(ParseHex("044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45af")); merkleBlock = CMerkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 4); BOOST_CHECK(pair == merkleBlock.vMatchedTxn[0]); BOOST_CHECK(merkleBlock.vMatchedTxn[1].second == uint256S("0x28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56ebdcacd3069a5f")); BOOST_CHECK(merkleBlock.vMatchedTxn[1].first == 1); BOOST_CHECK(merkleBlock.vMatchedTxn[2].second == uint256S("0x6b0f8a73a56c04b519f1883e8aafda643ba61a30bd1439969df21bea5f4e27e2")); BOOST_CHECK(merkleBlock.vMatchedTxn[2].first == 2); BOOST_CHECK(merkleBlock.vMatchedTxn[3].second == uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d7007663ace63cddb23")); BOOST_CHECK(merkleBlock.vMatchedTxn[3].first == 3); BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); } BOOST_AUTO_TEST_CASE(merkle_block_2_with_update_none) { // Random real block (000000005a4ded781e667e06ceefafb71410b511fe0d5adc3e5a27ecbec34ae6) // With 4 txes CBlock block; CDataStream stream(ParseHex("0100000075616236cc2126035fadb38deb65b9102cc2c41c09cdf29fc051906800000000fe7d5e12ef0ff901f6050211249919b1c0653771832b3a80c66cea42847f0ae1d4d26e49ffff001d00f0a4410401000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d029105ffffffff0100f2052a010000004341046d8709a041d34357697dfcb30a9d05900a6294078012bf3bb09c6f9b525f1d16d5503d7905db1ada9501446ea00728668fc5719aa80be2fdfc8a858a4dbdd4fbac00000000010000000255605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c268d28350000000049483045022100aa46504baa86df8a33b1192b1b9367b4d729dc41e389f2c04f3e5c7f0559aae702205e82253a54bf5c4f65b7428551554b2045167d6d206dfe6a2e198127d3f7df1501ffffffff55605dc6f5c3dc148b6da58442b0b2cd422be385eab2ebea4119ee9c268d2835010000004847304402202329484c35fa9d6bb32a55a70c0982f606ce0e3634b69006138683bcd12cbb6602200c28feb1e2555c3210f1dddb299738b4ff8bbe9667b68cb8764b5ac17b7adf0001ffffffff0200e1f505000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45afac0000000001000000025f9a06d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c2028000000004847304402205d6058484157235b06028c30736c15613a28bdb768ee628094ca8b0030d4d6eb0220328789c9a2ec27ddaec0ad5ef58efded42e6ea17c2e1ce838f3d6913f5e95db601ffffffff5f9a06d3acdceb56be1bfeaa3e8a25e62d182fa24fefe899d1c17f1dad4c2028010000004a493046022100c45af050d3cea806cedd0ab22520c53ebe63b987b8954146cdca42487b84bdd6022100b9b027716a6b59e640da50a864d6dd8a0ef24c76ce62391fa3eabaf4d2886d2d01ffffffff0200e1f505000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00180d8f000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac000000000100000002e2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b0000000048473044022016e7a727a061ea2254a6c358376aaa617ac537eb836c77d646ebda4c748aac8b0220192ce28bf9f2c06a6467e6531e27648d2b3e2e2bae85159c9242939840295ba501ffffffffe2274e5fea1bf29d963914bd301aa63b64daaf8a3e88f119b5046ca5738a0f6b010000004a493046022100b7a1a755588d4190118936e15cd217d133b0e4a53c3c15924010d5648d8925c9022100aaef031874db2114f2d869ac2de4ae53908fbfea5b2b1862e181626bb9005c9f01ffffffff0200e1f505000000004341044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45afac00180d8f000000004341046a0765b5865641ce08dd39690aade26dfbf5511430ca428a3089261361cef170e3929a68aee3d8d4848b0c5111b0a37b82b86ad559fd2a745b44d8e8d9dfdc0cac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_NONE); // Match the first transaction filter.insert(uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1); std::pair<unsigned int, uint256> pair = merkleBlock.vMatchedTxn[0]; BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0xe980fe9f792d014e73b95203dc1335c5f9ce19ac537a419e6df5b47aecb93b70")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0); std::vector<uint256> vMatched; std::vector<unsigned int> vIndex; BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); // Match an output from the second transaction (the pubkey for address 1DZTzaBHUDM7T3QvUKBz4qXMRpkg8jsfB5) // This should not match the third transaction though it spends the output matched // It will match the fourth transaction, which has another pay-to-pubkey output to the same address filter.insert(ParseHex("044a656f065871a353f216ca26cef8dde2f03e8c16202d2e8ad769f02032cb86a5eb5e56842e92e19141d60a01928f8dd2c875a390f67c1f6c94cfc617c0ea45af")); merkleBlock = CMerkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 3); BOOST_CHECK(pair == merkleBlock.vMatchedTxn[0]); BOOST_CHECK(merkleBlock.vMatchedTxn[1].second == uint256S("0x28204cad1d7fc1d199e8ef4fa22f182de6258a3eaafe1bbe56ebdcacd3069a5f")); BOOST_CHECK(merkleBlock.vMatchedTxn[1].first == 1); BOOST_CHECK(merkleBlock.vMatchedTxn[2].second == uint256S("0x3c1d7e82342158e4109df2e0b6348b6e84e403d8b4046d7007663ace63cddb23")); BOOST_CHECK(merkleBlock.vMatchedTxn[2].first == 3); BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); } BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize) { // Random real block (000000000000dab0130bbcc991d3d7ae6b81aa6f50a798888dfe62337458dc45) // With one tx CBlock block; CDataStream stream(ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff08044c86041b020a02ffffffff0100f2052a01000000434104ecd3229b0571c3be876feaac0442a9f13c5a572742927af1dc623353ecf8c202225f64868137a18cdd85cbbb4c74fbccfd4f49639cf1bdc94a5672bb15ad5d4cac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); // Match the only transaction filter.insert(uint256S("0x63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1); BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0x63194f18be0af63f2c6bc9dc0f777cbefed3d9415c4af83f3ee3a3d669c00cb5")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 0); std::vector<uint256> vMatched; std::vector<unsigned int> vIndex; BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); CDataStream merkleStream(SER_NETWORK, PROTOCOL_VERSION); merkleStream << merkleBlock; std::vector<unsigned char> vch = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101"); std::vector<char> expected(vch.size()); for (unsigned int i = 0; i < vch.size(); i++) expected[i] = (char)vch[i]; BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), merkleStream.begin(), merkleStream.end()); } BOOST_AUTO_TEST_CASE(merkle_block_4) { // Random real block (000000000000b731f2eef9e8c63173adfb07e41bd53eb0ef0a6b720d6cb6dea4) // With 7 txes CBlock block; CDataStream stream(ParseHex("0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc880670100000000007f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d9728776381b4d4c86041b554b85290701000000010000000000000000000000000000000000000000000000000000000000000000ffffffff07044c86041b0136ffffffff0100f2052a01000000434104eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a2252247d97a46a91ac000000000100000001bcad20a6a29827d1424f08989255120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356e834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062ea10221009253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa608cd5bab753901ffffffff02008d380c010000001976a9142b4b8072ecbba129b6453c63e129e643207249ca88ac0065cd1d000000001976a9141b8dd13b994bcfc787b32aeadf58ccb3615cbd5488ac000000000100000003fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26af16cb0b000000008c493046022100ea1608e70911ca0de5af51ba57ad23b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8acc8634c6b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf49e29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245bd69fcad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585caffffffff309e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e75429df397b5af83000000004948304502202bdb79c596a9ffc24e96f4386199aba386e9bc7b6071516e2b51dda942b3a1ed022100c53a857e76b724fc14d45311eac5019650d415c3abb5428f3aae16d8e69bec2301ffffffff2089e33491695080c9edc18a428f7d834db5b6d372df13ce2b1b0e0cbcb1e6c10000000049483045022100d4ce67c5896ee251c810ac1ff9ceccd328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d31f1187779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff0100714460030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8d88ac0000000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397cde8dd08138f4b72a00681743447000000008b48304502200c45de8c4f3e2c1821f2fc878cba97b1e6f8807d94930713aa1c86a67b9bf1e40221008581abfef2e30f957815fc89978423746b2086375ca8ecf359c85c2a5b7c88ad01410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d2253d88e0f248e29b599c80bbcec344a83dda5f9aa72c000000008a473044022078124c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e930220691d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346669507a3601410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95fffffffff878af0d93f5229a68166cf051fd372bb7a537232946e0a46f53636b4dafdaa4000000008c493046022100c717d1714551663f69c3c5759bdbb3a0fcd3fab023abc0e522fe6440de35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7fe1c2e7b46fc37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f894aa0fd2d9e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493046022100e00847147cbf517bcc2f502f3ddc6d284358d102ed20d47a8aa788a62f0db780022100d17b2d6fa84dcaf1c95d88d7e7c30385aecf415588d749afd3ec81f6022cecd701410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff0100c817a8040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c385d2188ac000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758df616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34fdce11eeb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243bd399ff96b649a0fad05fa759d6a882f0af8c90cf7632c2840c29070aec20141045e58067e815c2f464c6a2a15f987758374203895710c2d452442e28496ff38ba8f5fd901dc20e29e88477167fe4fc299bf818fd0d9e1632d467b2a3d9503b1aaffffffff0280d7e636030000001976a914f34c3e10eb387efe872acb614c89e78bfca7815d88ac404b4c00000000001976a914a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac00000000010000000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d850927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec2c1ac1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a257b5c63ebd90f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e521fa7946d12edbb1d1e95a15c34bd4398195e86433c92b431cd315f455fe30032ede69cad9d1e1ed6c3c4ec0dbfced53438c625462afb792dcb098544bffffffff0240420f00000000001976a9144676d1b820d63ec272f1900d59d43bc6463d96f888ac40420f00000000001976a914648d04341d00d7968b3405c034adc38d4d8fb9bd88ac00000000010000000248cc917501ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3f1000000008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e280007b52b133021acd9acc02205e325d613e555f772802bf413d36ba807892ed1a690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b5557b2c0b9df7b2b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c47d7b173dbc9db8d37db0a33ae487982c59c6f8606e9d1791ffffffff41ed70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d068000000008b4830450221008513ad65187b903aed1102d1d0c47688127658c51106753fed0151ce9c16b80902201432b9ebcb87bd04ceb2de66035fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf80125bf50be1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b06820edca9ef982c35fda2d255afba340068c5035552368bc7200c1488ffffffff0100093d00000000001976a9148edb68822f1ad580b043c7b3df2e400f8699eb4888ac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_ALL); // Match the last transaction filter.insert(uint256S("0x0a2a92f0bda4727d0a13eaddf4dd9ac6b5c61a1429e6b2b818f19b15df0ac154")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 1); std::pair<unsigned int, uint256> pair = merkleBlock.vMatchedTxn[0]; BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0x0a2a92f0bda4727d0a13eaddf4dd9ac6b5c61a1429e6b2b818f19b15df0ac154")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 6); std::vector<uint256> vMatched; std::vector<unsigned int> vIndex; BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); // Also match the 4th transaction filter.insert(uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041")); merkleBlock = CMerkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); BOOST_CHECK(merkleBlock.vMatchedTxn.size() == 2); BOOST_CHECK(merkleBlock.vMatchedTxn[0].second == uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041")); BOOST_CHECK(merkleBlock.vMatchedTxn[0].first == 3); BOOST_CHECK(merkleBlock.vMatchedTxn[1] == pair); BOOST_CHECK(merkleBlock.txn.ExtractMatches(vMatched, vIndex) == block.hashMerkleRoot); BOOST_CHECK(vMatched.size() == merkleBlock.vMatchedTxn.size()); for (unsigned int i = 0; i < vMatched.size(); i++) BOOST_CHECK(vMatched[i] == merkleBlock.vMatchedTxn[i].second); } BOOST_AUTO_TEST_CASE(merkle_block_4_test_p2pubkey_only) { // Random real block (000000000000b731f2eef9e8c63173adfb07e41bd53eb0ef0a6b720d6cb6dea4) // With 7 txes CBlock block; CDataStream stream(ParseHex("0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc880670100000000007f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d9728776381b4d4c86041b554b85290701000000010000000000000000000000000000000000000000000000000000000000000000ffffffff07044c86041b0136ffffffff0100f2052a01000000434104eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a2252247d97a46a91ac000000000100000001bcad20a6a29827d1424f08989255120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356e834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062ea10221009253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa608cd5bab753901ffffffff02008d380c010000001976a9142b4b8072ecbba129b6453c63e129e643207249ca88ac0065cd1d000000001976a9141b8dd13b994bcfc787b32aeadf58ccb3615cbd5488ac000000000100000003fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26af16cb0b000000008c493046022100ea1608e70911ca0de5af51ba57ad23b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8acc8634c6b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf49e29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245bd69fcad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585caffffffff309e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e75429df397b5af83000000004948304502202bdb79c596a9ffc24e96f4386199aba386e9bc7b6071516e2b51dda942b3a1ed022100c53a857e76b724fc14d45311eac5019650d415c3abb5428f3aae16d8e69bec2301ffffffff2089e33491695080c9edc18a428f7d834db5b6d372df13ce2b1b0e0cbcb1e6c10000000049483045022100d4ce67c5896ee251c810ac1ff9ceccd328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d31f1187779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff0100714460030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8d88ac0000000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397cde8dd08138f4b72a00681743447000000008b48304502200c45de8c4f3e2c1821f2fc878cba97b1e6f8807d94930713aa1c86a67b9bf1e40221008581abfef2e30f957815fc89978423746b2086375ca8ecf359c85c2a5b7c88ad01410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d2253d88e0f248e29b599c80bbcec344a83dda5f9aa72c000000008a473044022078124c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e930220691d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346669507a3601410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95fffffffff878af0d93f5229a68166cf051fd372bb7a537232946e0a46f53636b4dafdaa4000000008c493046022100c717d1714551663f69c3c5759bdbb3a0fcd3fab023abc0e522fe6440de35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7fe1c2e7b46fc37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f894aa0fd2d9e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493046022100e00847147cbf517bcc2f502f3ddc6d284358d102ed20d47a8aa788a62f0db780022100d17b2d6fa84dcaf1c95d88d7e7c30385aecf415588d749afd3ec81f6022cecd701410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff0100c817a8040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c385d2188ac000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758df616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34fdce11eeb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243bd399ff96b649a0fad05fa759d6a882f0af8c90cf7632c2840c29070aec20141045e58067e815c2f464c6a2a15f987758374203895710c2d452442e28496ff38ba8f5fd901dc20e29e88477167fe4fc299bf818fd0d9e1632d467b2a3d9503b1aaffffffff0280d7e636030000001976a914f34c3e10eb387efe872acb614c89e78bfca7815d88ac404b4c00000000001976a914a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac00000000010000000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d850927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec2c1ac1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a257b5c63ebd90f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e521fa7946d12edbb1d1e95a15c34bd4398195e86433c92b431cd315f455fe30032ede69cad9d1e1ed6c3c4ec0dbfced53438c625462afb792dcb098544bffffffff0240420f00000000001976a9144676d1b820d63ec272f1900d59d43bc6463d96f888ac40420f00000000001976a914648d04341d00d7968b3405c034adc38d4d8fb9bd88ac00000000010000000248cc917501ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3f1000000008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e280007b52b133021acd9acc02205e325d613e555f772802bf413d36ba807892ed1a690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b5557b2c0b9df7b2b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c47d7b173dbc9db8d37db0a33ae487982c59c6f8606e9d1791ffffffff41ed70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d068000000008b4830450221008513ad65187b903aed1102d1d0c47688127658c51106753fed0151ce9c16b80902201432b9ebcb87bd04ceb2de66035fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf80125bf50be1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b06820edca9ef982c35fda2d255afba340068c5035552368bc7200c1488ffffffff0100093d00000000001976a9148edb68822f1ad580b043c7b3df2e400f8699eb4888ac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_P2PUBKEY_ONLY); // Match the generation pubkey filter.insert(ParseHex("04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a2252247d97a46a91")); // ...and the output address of the 4th transaction filter.insert(ParseHex("b6efd80d99179f4f4ff6f4dd0a007d018c385d21")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); // We should match the generation outpoint BOOST_CHECK(filter.contains(COutPoint(uint256S("0x147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"), 0))); // ... but not the 4th transaction's output (its not pay-2-pubkey) BOOST_CHECK(!filter.contains(COutPoint(uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"), 0))); } BOOST_AUTO_TEST_CASE(merkle_block_4_test_update_none) { // Random real block (000000000000b731f2eef9e8c63173adfb07e41bd53eb0ef0a6b720d6cb6dea4) // With 7 txes CBlock block; CDataStream stream(ParseHex("0100000082bb869cf3a793432a66e826e05a6fc37469f8efb7421dc880670100000000007f16c5962e8bd963659c793ce370d95f093bc7e367117b3c30c1f8fdd0d9728776381b4d4c86041b554b85290701000000010000000000000000000000000000000000000000000000000000000000000000ffffffff07044c86041b0136ffffffff0100f2052a01000000434104eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a2252247d97a46a91ac000000000100000001bcad20a6a29827d1424f08989255120bf7f3e9e3cdaaa6bb31b0737fe048724300000000494830450220356e834b046cadc0f8ebb5a8a017b02de59c86305403dad52cd77b55af062ea10221009253cd6c119d4729b77c978e1e2aa19f5ea6e0e52b3f16e32fa608cd5bab753901ffffffff02008d380c010000001976a9142b4b8072ecbba129b6453c63e129e643207249ca88ac0065cd1d000000001976a9141b8dd13b994bcfc787b32aeadf58ccb3615cbd5488ac000000000100000003fdacf9b3eb077412e7a968d2e4f11b9a9dee312d666187ed77ee7d26af16cb0b000000008c493046022100ea1608e70911ca0de5af51ba57ad23b9a51db8d28f82c53563c56a05c20f5a87022100a8bdc8b4a8acc8634c6b420410150775eb7f2474f5615f7fccd65af30f310fbf01410465fdf49e29b06b9a1582287b6279014f834edc317695d125ef623c1cc3aaece245bd69fcad7508666e9c74a49dc9056d5fc14338ef38118dc4afae5fe2c585caffffffff309e1913634ecb50f3c4f83e96e70b2df071b497b8973a3e75429df397b5af83000000004948304502202bdb79c596a9ffc24e96f4386199aba386e9bc7b6071516e2b51dda942b3a1ed022100c53a857e76b724fc14d45311eac5019650d415c3abb5428f3aae16d8e69bec2301ffffffff2089e33491695080c9edc18a428f7d834db5b6d372df13ce2b1b0e0cbcb1e6c10000000049483045022100d4ce67c5896ee251c810ac1ff9ceccd328b497c8f553ab6e08431e7d40bad6b5022033119c0c2b7d792d31f1187779c7bd95aefd93d90a715586d73801d9b47471c601ffffffff0100714460030000001976a914c7b55141d097ea5df7a0ed330cf794376e53ec8d88ac0000000001000000045bf0e214aa4069a3e792ecee1e1bf0c1d397cde8dd08138f4b72a00681743447000000008b48304502200c45de8c4f3e2c1821f2fc878cba97b1e6f8807d94930713aa1c86a67b9bf1e40221008581abfef2e30f957815fc89978423746b2086375ca8ecf359c85c2a5b7c88ad01410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffffd669f7d7958d40fc59d2253d88e0f248e29b599c80bbcec344a83dda5f9aa72c000000008a473044022078124c8beeaa825f9e0b30bff96e564dd859432f2d0cb3b72d3d5d93d38d7e930220691d233b6c0f995be5acb03d70a7f7a65b6bc9bdd426260f38a1346669507a3601410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95fffffffff878af0d93f5229a68166cf051fd372bb7a537232946e0a46f53636b4dafdaa4000000008c493046022100c717d1714551663f69c3c5759bdbb3a0fcd3fab023abc0e522fe6440de35d8290221008d9cbe25bffc44af2b18e81c58eb37293fd7fe1c2e7b46fc37ee8c96c50ab1e201410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff27f2b668859cd7f2f894aa0fd2d9e60963bcd07c88973f425f999b8cbfd7a1e2000000008c493046022100e00847147cbf517bcc2f502f3ddc6d284358d102ed20d47a8aa788a62f0db780022100d17b2d6fa84dcaf1c95d88d7e7c30385aecf415588d749afd3ec81f6022cecd701410462bb73f76ca0994fcb8b4271e6fb7561f5c0f9ca0cf6485261c4a0dc894f4ab844c6cdfb97cd0b60ffb5018ffd6238f4d87270efb1d3ae37079b794a92d7ec95ffffffff0100c817a8040000001976a914b6efd80d99179f4f4ff6f4dd0a007d018c385d2188ac000000000100000001834537b2f1ce8ef9373a258e10545ce5a50b758df616cd4356e0032554ebd3c4000000008b483045022100e68f422dd7c34fdce11eeb4509ddae38201773dd62f284e8aa9d96f85099d0b002202243bd399ff96b649a0fad05fa759d6a882f0af8c90cf7632c2840c29070aec20141045e58067e815c2f464c6a2a15f987758374203895710c2d452442e28496ff38ba8f5fd901dc20e29e88477167fe4fc299bf818fd0d9e1632d467b2a3d9503b1aaffffffff0280d7e636030000001976a914f34c3e10eb387efe872acb614c89e78bfca7815d88ac404b4c00000000001976a914a84e272933aaf87e1715d7786c51dfaeb5b65a6f88ac00000000010000000143ac81c8e6f6ef307dfe17f3d906d999e23e0189fda838c5510d850927e03ae7000000008c4930460221009c87c344760a64cb8ae6685a3eec2c1ac1bed5b88c87de51acd0e124f266c16602210082d07c037359c3a257b5c63ebd90f5a5edf97b2ac1c434b08ca998839f346dd40141040ba7e521fa7946d12edbb1d1e95a15c34bd4398195e86433c92b431cd315f455fe30032ede69cad9d1e1ed6c3c4ec0dbfced53438c625462afb792dcb098544bffffffff0240420f00000000001976a9144676d1b820d63ec272f1900d59d43bc6463d96f888ac40420f00000000001976a914648d04341d00d7968b3405c034adc38d4d8fb9bd88ac00000000010000000248cc917501ea5c55f4a8d2009c0567c40cfe037c2e71af017d0a452ff705e3f1000000008b483045022100bf5fdc86dc5f08a5d5c8e43a8c9d5b1ed8c65562e280007b52b133021acd9acc02205e325d613e555f772802bf413d36ba807892ed1a690a77811d3033b3de226e0a01410429fa713b124484cb2bd7b5557b2c0b9df7b2b1fee61825eadc5ae6c37a9920d38bfccdc7dc3cb0c47d7b173dbc9db8d37db0a33ae487982c59c6f8606e9d1791ffffffff41ed70551dd7e841883ab8f0b16bf04176b7d1480e4f0af9f3d4c3595768d068000000008b4830450221008513ad65187b903aed1102d1d0c47688127658c51106753fed0151ce9c16b80902201432b9ebcb87bd04ceb2de66035fbbaf4bf8b00d1cfe41f1a1f7338f9ad79d210141049d4cf80125bf50be1709f718c07ad15d0fc612b7da1f5570dddc35f2a352f0f27c978b06820edca9ef982c35fda2d255afba340068c5035552368bc7200c1488ffffffff0100093d00000000001976a9148edb68822f1ad580b043c7b3df2e400f8699eb4888ac00000000"), SER_NETWORK, PROTOCOL_VERSION); stream >> block; CBloomFilter filter(10, 0.000001, 0, BLOOM_UPDATE_NONE); // Match the generation pubkey filter.insert(ParseHex("04eaafc2314def4ca98ac970241bcab022b9c1e1f4ea423a20f134c876f2c01ec0f0dd5b2e86e7168cefe0d81113c3807420ce13ad1357231a2252247d97a46a91")); // ...and the output address of the 4th transaction filter.insert(ParseHex("b6efd80d99179f4f4ff6f4dd0a007d018c385d21")); CMerkleBlock merkleBlock(block, filter); BOOST_CHECK(merkleBlock.header.GetHash() == block.GetHash()); // We shouldn't match any outpoints (UPDATE_NONE) BOOST_CHECK(!filter.contains(COutPoint(uint256S("0x147caa76786596590baa4e98f5d9f48b86c7765e489f7a6ff3360fe5c674360b"), 0))); BOOST_CHECK(!filter.contains(COutPoint(uint256S("0x02981fa052f0481dbc5868f4fc2166035a10f27a03cfd2de67326471df5bc041"), 0))); } static std::vector<unsigned char> RandomData() { uint256 r = GetRandHash(); return std::vector<unsigned char>(r.begin(), r.end()); } BOOST_AUTO_TEST_CASE(rolling_bloom) { // last-100-entry, 1% false positive: CRollingBloomFilter rb1(100, 0.01); // Overfill: static const int DATASIZE=399; std::vector<unsigned char> data[DATASIZE]; for (int i = 0; i < DATASIZE; i++) { data[i] = RandomData(); rb1.insert(data[i]); } // Last 100 guaranteed to be remembered: for (int i = 299; i < DATASIZE; i++) { BOOST_CHECK(rb1.contains(data[i])); } // false positive rate is 1%, so we should get about 100 hits if // testing 10,000 random keys. We get worst-case false positive // behavior when the filter is as full as possible, which is // when we've inserted one minus an integer multiple of nElement*2. unsigned int nHits = 0; for (int i = 0; i < 10000; i++) { if (rb1.contains(RandomData())) ++nHits; } // Run test_youlike with --log_level=message to see BOOST_TEST_MESSAGEs: BOOST_TEST_MESSAGE("RollingBloomFilter got " << nHits << " false positives (~100 expected)"); // Insanely unlikely to get a fp count outside this range: BOOST_CHECK(nHits > 25); BOOST_CHECK(nHits < 175); BOOST_CHECK(rb1.contains(data[DATASIZE-1])); rb1.reset(); BOOST_CHECK(!rb1.contains(data[DATASIZE-1])); // Now roll through data, make sure last 100 entries // are always remembered: for (int i = 0; i < DATASIZE; i++) { if (i >= 100) BOOST_CHECK(rb1.contains(data[i-100])); rb1.insert(data[i]); BOOST_CHECK(rb1.contains(data[i])); } // Insert 999 more random entries: for (int i = 0; i < 999; i++) { std::vector<unsigned char> d = RandomData(); rb1.insert(d); BOOST_CHECK(rb1.contains(d)); } // Sanity check to make sure the filter isn't just filling up: nHits = 0; for (int i = 0; i < DATASIZE; i++) { if (rb1.contains(data[i])) ++nHits; } // Expect about 5 false positives, more than 100 means // something is definitely broken. BOOST_TEST_MESSAGE("RollingBloomFilter got " << nHits << " false positives (~5 expected)"); BOOST_CHECK(nHits < 100); // last-1000-entry, 0.01% false positive: CRollingBloomFilter rb2(1000, 0.001); for (int i = 0; i < DATASIZE; i++) { rb2.insert(data[i]); } // ... room for all of them: for (int i = 0; i < DATASIZE; i++) { BOOST_CHECK(rb2.contains(data[i])); } } BOOST_AUTO_TEST_SUITE_END()
#ifndef DI_SYSTEMS_VR_TEXTURE_DATA_VULKAN_HPP_ #define DI_SYSTEMS_VR_TEXTURE_DATA_VULKAN_HPP_ #include <cstdint> #include <openvr.h> typedef std::uint64_t VkImage ; typedef struct VkDevice_T* VkDevice ; typedef struct VkInstance_T* VkInstance ; typedef struct VkPhysicalDevice_T* VkPhysicalDevice; typedef struct VkQueue_T* VkQueue ; namespace di { struct texture_data_vulkan { VkImage image ; VkDevice device ; VkPhysicalDevice physical_device ; VkInstance instance ; VkQueue queue ; std::uint32_t queue_family_index ; std::uint32_t width, height, format, sample_count; }; } #endif
// Filename: mayaNodeDesc.cxx // Created by: drose (06Jun03) // //////////////////////////////////////////////////////////////////// // // PANDA 3D SOFTWARE // Copyright (c) Carnegie Mellon University. All rights reserved. // // All use of this software is subject to the terms of the revised BSD // license. You should have received a copy of this license along // with this source code in a file named "LICENSE." // //////////////////////////////////////////////////////////////////// #include "mayaNodeDesc.h" #include "mayaNodeTree.h" #include "mayaBlendDesc.h" #include "mayaToEggConverter.h" #include "maya_funcs.h" #include "eggGroup.h" #include "config_mayaegg.h" #include "pre_maya_include.h" #include <maya/MFnBlendShapeDeformer.h> #include <maya/MItDependencyGraph.h> #include <maya/MFnNurbsSurface.h> #include <maya/MFnMesh.h> #include "post_maya_include.h" TypeHandle MayaNodeDesc::_type_handle; // This is a list of the names of Maya connections that count as a // transform. static const char *transform_connections[] = { "translate", "translateX", "translateY", "translateZ", "rotate", "rotateX", "rotateY", "rotateZ", }; static const int num_transform_connections = sizeof(transform_connections) / sizeof(const char *); //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::Constructor // Access: Public // Description: //////////////////////////////////////////////////////////////////// MayaNodeDesc:: MayaNodeDesc(MayaNodeTree *tree, MayaNodeDesc *parent, const string &name) : Namable(name), _tree(tree), _parent(parent) { _dag_path = (MDagPath *)NULL; _egg_group = (EggGroup *)NULL; _egg_table = (EggTable *)NULL; _anim = (EggXfmSAnim *)NULL; _joint_type = JT_none; _is_lod = false; _tagged = false; _joint_tagged = false; // Add ourselves to our parent. if (_parent != (MayaNodeDesc *)NULL) { _parent->_children.push_back(this); } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::Destructor // Access: Public // Description: //////////////////////////////////////////////////////////////////// MayaNodeDesc:: ~MayaNodeDesc() { if (_dag_path != (MDagPath *)NULL) { delete _dag_path; } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::from_dag_path // Access: Public // Description: Indicates an association between the MayaNodeDesc and // some Maya instance. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: from_dag_path(const MDagPath &dag_path, MayaToEggConverter *converter) { MStatus status; if (_dag_path == (MDagPath *)NULL) { _dag_path = new MDagPath(dag_path); string name; MFnDagNode dag_node(dag_path, &status); if (!status) { status.perror("MFnDagNode constructor"); } else { name = dag_node.name().asChar(); } if (_dag_path->hasFn(MFn::kJoint) || converter->force_joint(name)) { // This node is a joint, or the user specifically asked to treat // it like a joint. _joint_type = JT_joint; if (_parent != (MayaNodeDesc *)NULL) { _parent->mark_joint_parent(); } } else { // The node is not a joint, but maybe its transform is // controlled by connected inputs. If so, we should treat it // like a joint. bool transform_connected = false; MStatus status; MObject node = dag_path.node(&status); if (status) { for (int i = 0; i < num_transform_connections && !transform_connected; i++) { if (is_connected(node, transform_connections[i])) { transform_connected = true; } } } if (transform_connected) { _joint_type = JT_joint; if (_parent != (MayaNodeDesc *)NULL) { _parent->mark_joint_parent(); } } } if (dag_path.hasFn(MFn::kNurbsSurface)) { MFnNurbsSurface surface(dag_path, &status); if (status) { check_blend_shapes(surface, "create"); } } else if (dag_path.hasFn(MFn::kMesh)) { MFnMesh mesh(dag_path, &status); if (status) { check_blend_shapes(mesh, "inMesh"); } } } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::has_dag_path // Access: Public // Description: Returns true if a Maya dag path has been associated // with this node, false otherwise. //////////////////////////////////////////////////////////////////// bool MayaNodeDesc:: has_dag_path() const { return (_dag_path != (MDagPath *)NULL); } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::get_dag_path // Access: Public // Description: Returns the dag path associated with this node. It // is an error to call this unless has_dag_path() // returned true. //////////////////////////////////////////////////////////////////// const MDagPath &MayaNodeDesc:: get_dag_path() const { nassertr(_dag_path != (MDagPath *)NULL, *_dag_path); return *_dag_path; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::get_num_blend_descs // Access: Public // Description: Returns the number of unique MayaBlendDesc objects // (and hence the number of morph sliders) that affect // the geometry in this node. //////////////////////////////////////////////////////////////////// int MayaNodeDesc:: get_num_blend_descs() const { return _blend_descs.size(); } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::get_blend_desc // Access: Public // Description: Returns the nth MayaBlendDesc object that affects the // geometry in this node. //////////////////////////////////////////////////////////////////// MayaBlendDesc *MayaNodeDesc:: get_blend_desc(int n) const { nassertr(n >= 0 && n < (int)_blend_descs.size(), NULL); return _blend_descs[n]; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::is_joint // Access: Public // Description: Returns true if the node should be treated as a joint // by the converter. //////////////////////////////////////////////////////////////////// bool MayaNodeDesc:: is_joint() const { //return _joint_type == JT_joint || _joint_type == JT_pseudo_joint; return _joint_tagged && (_joint_type == JT_joint || _joint_type == JT_pseudo_joint); } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::is_joint_parent // Access: Public // Description: Returns true if the node is the parent or ancestor of // a joint. //////////////////////////////////////////////////////////////////// bool MayaNodeDesc:: is_joint_parent() const { return _joint_type == JT_joint_parent; //return _joint_tagged && (_joint_type == JT_joint_parent); } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::is_joint_tagged // Access: Public // Description: Returns true if the node has been joint_tagged to be // converted, false otherwise. //////////////////////////////////////////////////////////////////// bool MayaNodeDesc:: is_joint_tagged() const { return _joint_tagged; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::tag_joint // Access: Private // Description: Tags this node for conversion, but does not tag child // nodes. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: tag_joint() { _joint_tagged = true; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::tag_joint_recursively // Access: Private // Description: Tags this node and all descendant nodes for // conversion. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: tag_joint_recursively() { _joint_tagged = true; //mayaegg_cat.info() << "tjr: " << get_name() << endl; Children::const_iterator ci; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); child->tag_joint_recursively(); } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::is_tagged // Access: Public // Description: Returns true if the node has been tagged to be // converted, false otherwise. //////////////////////////////////////////////////////////////////// bool MayaNodeDesc:: is_tagged() const { return _tagged; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::tag // Access: Private // Description: Tags this node for conversion, but does not tag child // nodes. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: tag() { _tagged = true; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::untag // Access: Private // Description: Un-tags this node for conversion, but does not tag child // nodes. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: untag() { _tagged = false; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::tag_recursively // Access: Private // Description: Tags this node and all descendant nodes for // conversion. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: tag_recursively() { _tagged = true; Children::const_iterator ci; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); child->tag_recursively(); } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::untag_recursively // Access: Private // Description: Un-tags this node and all descendant nodes for // conversion. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: untag_recursively() { _tagged = false; Children::const_iterator ci; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); child->untag_recursively(); } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::has_object_type // Access: Public // Description: Returns true if this node or any of its parent // has_object_type of object_type. //////////////////////////////////////////////////////////////////// bool MayaNodeDesc:: has_object_type(string object_type) const { bool ret = false; if ((_egg_group != (EggGroup*) NULL) && _egg_group->has_object_type(object_type)) { return true; } if (_parent != (MayaNodeDesc *)NULL) { ret |= _parent->has_object_type(object_type); } return ret; } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::clear_egg // Access: Private // Description: Recursively clears the egg pointers from this node // and all children. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: clear_egg() { _egg_group = (EggGroup *)NULL; _egg_table = (EggTable *)NULL; _anim = (EggXfmSAnim *)NULL; Children::const_iterator ci; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); child->clear_egg(); } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::mark_joint_parent // Access: Private // Description: Indicates that this node has at least one child that // is a joint or a pseudo-joint. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: mark_joint_parent() { if (_joint_type == JT_none) { _joint_type = JT_joint_parent; if (_parent != (MayaNodeDesc *)NULL) { _parent->mark_joint_parent(); } } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::check_pseudo_joints // Access: Private // Description: Walks the hierarchy, looking for non-joint nodes that // are both children and parents of a joint. These // nodes are deemed to be pseudo joints, since the // converter must treat them as joints. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: check_pseudo_joints(bool joint_above) { static PN_uint32 space_count = 0; string space; for (PN_uint32 idx=0; idx<space_count; ++idx) { space.append(" "); } if (mayaegg_cat.is_spam()) { mayaegg_cat.spam() << "cpj:" << space << get_name() << " joint_type: " << _joint_type << endl; } if (_joint_type == JT_joint_parent && joint_above) { // This is one such node: it is the parent of a joint // (JT_joint_parent is set), and it is the child of a joint // (joint_above is set). _joint_type = JT_pseudo_joint; } if (_joint_type == JT_joint) { // If this node is itself a joint, then joint_above is true for // all child nodes. joint_above = true; } // Don't bother traversing further if _joint_type is none, since // that means this node has no joint children. if (_joint_type != JT_none) { bool any_joints = false; Children::const_iterator ci; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); if (mayaegg_cat.is_spam()) { ++space_count; } child->check_pseudo_joints(joint_above); //if (child->is_joint()) { if (child->_joint_type == JT_joint || child->_joint_type == JT_pseudo_joint) { any_joints = true; } } // If any children qualify as joints, then any sibling nodes that // are parents of joints are also elevated to joints. if (any_joints) { bool all_joints = true; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); MStatus status; MFnDagNode dag_node(child->get_dag_path(), &status); if (!status) { status.perror("MFnDagNode constructor"); } string type_name = dag_node.typeName().asChar(); if (child->_joint_type == JT_joint_parent) { child->_joint_type = JT_pseudo_joint; } else if (child->_joint_type == JT_none) { if (mayaegg_cat.is_spam()) { mayaegg_cat.spam() << "cpj: " << space << "jt_none for " << child->get_name() << endl; } if (type_name.find("transform") == string::npos) { if (mayaegg_cat.is_spam()) { mayaegg_cat.spam() << "cpj: " << space << "all_joints false for " << get_name() << endl; } all_joints = false; } } } if (all_joints) { // Finally, if all children are joints, then we are too. if (_joint_type == JT_joint_parent) { if (!get_name().empty()) { // make sure parent of root is not a joint _joint_type = JT_pseudo_joint; } } } } } if (mayaegg_cat.is_spam()) { if (space_count > 0) --space_count; } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::check_blend_shapes // Access: Private // Description: Looks for blend shapes on a NURBS surface or polygon // mesh and records any blend shapes found. This is // similar to MayaToEggConverter::get_vertex_weights(), // which checks for membership of vertices to joints; // Maya stores the blend shape table in the same place. // See the comments in get_vertex_weights() for a more // in-depth description of the iteration process here. //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: check_blend_shapes(const MFnDagNode &node, const string &attrib_name) { MStatus status; MObject attr = node.attribute(attrib_name.c_str()); MPlug history(node.object(), attr); MItDependencyGraph it(history, MFn::kDependencyNode, MItDependencyGraph::kUpstream, MItDependencyGraph::kDepthFirst, MItDependencyGraph::kNodeLevel); while (!it.isDone()) { MObject c_node = it.thisNode(); if (c_node.hasFn(MFn::kBlendShape)) { MFnBlendShapeDeformer blends(c_node, &status); if (!status) { status.perror("MFnBlendShapeDeformer constructor"); } else { // Check if the slider is a "parallel blender", which is a // construct created by Maya for Maya's internal purposes // only. We don't want to fiddle with the parallel blenders. MPlug plug = blends.findPlug("pb"); bool is_parallel_blender; status = plug.getValue(is_parallel_blender); if (!status) { status.perror("Could not get value of pb plug."); is_parallel_blender = false; } if (is_parallel_blender || _tree->ignore_slider(blends.name().asChar())) { _tree->report_ignored_slider(blends.name().asChar()); } else { MObjectArray base_objects; status = blends.getBaseObjects(base_objects); if (!status) { status.perror("MFnBlendShapeDeformer::getBaseObjects"); } else { for (unsigned int oi = 0; oi < base_objects.length(); oi++) { MObject base_object = base_objects[oi]; MIntArray index_list; status = blends.weightIndexList(index_list); if (!status) { status.perror("MFnBlendShapeDeformer::weightIndexList"); } else { for (unsigned int i = 0; i < index_list.length(); i++) { int wi = index_list[i]; PT(MayaBlendDesc) blend_desc = new MayaBlendDesc(blends, wi); blend_desc = _tree->add_blend_desc(blend_desc); _blend_descs.push_back(blend_desc); } } } } } } } it.next(); } } //////////////////////////////////////////////////////////////////// // Function: MayaNodeDesc::check_lods // Access: Private // Description: Walks through the hierarchy again and checks for LOD // specifications. Any such specifications found are // recorded on the child nodes of the lodGroups // themselves: the nodes that actually switch in and // out. (This is the way they are recorded in an egg // file.) //////////////////////////////////////////////////////////////////// void MayaNodeDesc:: check_lods() { // Walk through the children first. This makes it easier in the // below (we only have to return in the event of an error). Children::iterator ci; for (ci = _children.begin(); ci != _children.end(); ++ci) { MayaNodeDesc *child = (*ci); child->check_lods(); } // Now consider whether this node is an lodGroup. if (_dag_path != (MDagPath *)NULL && _dag_path->hasFn(MFn::kLodGroup)) { // This node is a parent lodGroup; its children, therefore, are // LOD's. MStatus status; MFnDagNode dag_node(*_dag_path, &status); if (!status) { status.perror("Couldn't get node from dag path for lodGroup"); return; } MPlug plug = dag_node.findPlug("threshold", &status); if (!status) { status.perror("Couldn't get threshold attributes on lodGroup"); return; } // There ought to be the one fewer elements in the array than // there are children of the node. unsigned int num_elements = plug.numElements(); unsigned int num_children = _children.size(); if (num_elements + 1 != num_children) { mayaegg_cat.warning() << "Node " << get_name() << " has " << num_elements << " LOD entries, but " << num_children << " children.\n"; } // Should we also consider cameraMatrix, to transform the LOD's // origin? It's not clear precisely what this transform matrix // means in Maya, so we'll wait until we have a sample file that // demonstrates its use. double switch_out = 0.0; unsigned int i = 0; while (i < num_elements && i < num_children) { MPlug element = plug.elementByLogicalIndex(i); MayaNodeDesc *child = _children[i]; double switch_in; status = element.getValue(switch_in); if (!status) { status.perror("Couldn't get double value from threshold."); return; } child->_is_lod = true; child->_switch_in = switch_in; child->_switch_out = switch_out; switch_out = switch_in; ++i; } while (i < num_children) { // Also set the last child(ren). Maya wants this to switch in // at infinity, but Panda doesn't have such a concept; we'll // settle for four times the switch_out distance. MayaNodeDesc *child = _children[i]; child->_is_lod = true; child->_switch_in = switch_out * 4.0; child->_switch_out = switch_out; ++i; } } }
/* * Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2012 Apple Inc. All rights reserved. * Copyright (C) 2005 Alexey Proskuryakov. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" #include "core/editing/PlainTextRange.h" #include "core/dom/ContainerNode.h" #include "core/dom/Document.h" #include "core/dom/Range.h" #include "core/editing/TextIterator.h" #include "core/editing/VisiblePosition.h" namespace WebCore { PlainTextRange::PlainTextRange() : m_start(kNotFound) , m_end(kNotFound) { } PlainTextRange::PlainTextRange(int location) : m_start(location) , m_end(location) { ASSERT(location >= 0); } PlainTextRange::PlainTextRange(int start, int end) : m_start(start) , m_end(end) { ASSERT(start >= 0); ASSERT(end >= 0); ASSERT(start <= end); } PassRefPtrWillBeRawPtr<Range> PlainTextRange::createRange(const ContainerNode& scope) const { return createRangeFor(scope, ForGeneric); } PassRefPtrWillBeRawPtr<Range> PlainTextRange::createRangeForSelection(const ContainerNode& scope) const { return createRangeFor(scope, ForSelection); } PassRefPtrWillBeRawPtr<Range> PlainTextRange::createRangeFor(const ContainerNode& scope, GetRangeFor getRangeFor) const { ASSERT(isNotNull()); RefPtrWillBeRawPtr<Range> resultRange = scope.document().createRange(); size_t docTextPosition = 0; bool startRangeFound = false; RefPtrWillBeRawPtr<Range> textRunRange = nullptr; TextIteratorBehaviorFlags behaviorFlags = TextIteratorEmitsObjectReplacementCharacter; if (getRangeFor == ForSelection) behaviorFlags |= TextIteratorEmitsCharactersBetweenAllVisiblePositions; TextIterator it(rangeOfContents(const_cast<ContainerNode*>(&scope)).get(), behaviorFlags); // FIXME: the atEnd() check shouldn't be necessary, workaround for <http://bugs.webkit.org/show_bug.cgi?id=6289>. if (!start() && !length() && it.atEnd()) { textRunRange = it.range(); resultRange->setStart(textRunRange->startContainer(), 0, ASSERT_NO_EXCEPTION); resultRange->setEnd(textRunRange->startContainer(), 0, ASSERT_NO_EXCEPTION); return resultRange.release(); } for (; !it.atEnd(); it.advance()) { int len = it.length(); textRunRange = it.range(); bool foundStart = start() >= docTextPosition && start() <= docTextPosition + len; bool foundEnd = end() >= docTextPosition && end() <= docTextPosition + len; // Fix textRunRange->endPosition(), but only if foundStart || foundEnd, because it is only // in those cases that textRunRange is used. if (foundEnd) { // FIXME: This is a workaround for the fact that the end of a run is often at the wrong // position for emitted '\n's. if (len == 1 && it.characterAt(0) == '\n') { scope.document().updateLayoutIgnorePendingStylesheets(); it.advance(); if (!it.atEnd()) { RefPtrWillBeRawPtr<Range> range = it.range(); textRunRange->setEnd(range->startContainer(), range->startOffset(), ASSERT_NO_EXCEPTION); } else { Position runStart = textRunRange->startPosition(); Position runEnd = VisiblePosition(runStart).next().deepEquivalent(); if (runEnd.isNotNull()) textRunRange->setEnd(runEnd.containerNode(), runEnd.computeOffsetInContainerNode(), ASSERT_NO_EXCEPTION); } } } if (foundStart) { startRangeFound = true; if (textRunRange->startContainer()->isTextNode()) { int offset = start() - docTextPosition; resultRange->setStart(textRunRange->startContainer(), offset + textRunRange->startOffset(), IGNORE_EXCEPTION); } else { if (start() == docTextPosition) resultRange->setStart(textRunRange->startContainer(), textRunRange->startOffset(), IGNORE_EXCEPTION); else resultRange->setStart(textRunRange->endContainer(), textRunRange->endOffset(), IGNORE_EXCEPTION); } } if (foundEnd) { if (textRunRange->startContainer()->isTextNode()) { int offset = end() - docTextPosition; resultRange->setEnd(textRunRange->startContainer(), offset + textRunRange->startOffset(), IGNORE_EXCEPTION); } else { if (end() == docTextPosition) resultRange->setEnd(textRunRange->startContainer(), textRunRange->startOffset(), IGNORE_EXCEPTION); else resultRange->setEnd(textRunRange->endContainer(), textRunRange->endOffset(), IGNORE_EXCEPTION); } docTextPosition += len; break; } docTextPosition += len; } if (!startRangeFound) return nullptr; if (length() && end() > docTextPosition) { // end() is out of bounds resultRange->setEnd(textRunRange->endContainer(), textRunRange->endOffset(), IGNORE_EXCEPTION); } return resultRange.release(); } PlainTextRange PlainTextRange::create(const Node& scope, const Range& range) { if (!range.startContainer()) return PlainTextRange(); // The critical assumption is that this only gets called with ranges that // concentrate on a given area containing the selection root. This is done // because of text fields and textareas. The DOM for those is not // directly in the document DOM, so ensure that the range does not cross a // boundary of one of those. if (range.startContainer() != &scope && !range.startContainer()->isDescendantOf(&scope)) return PlainTextRange(); if (range.endContainer() != scope && !range.endContainer()->isDescendantOf(&scope)) return PlainTextRange(); RefPtrWillBeRawPtr<Range> testRange = Range::create(scope.document(), const_cast<Node*>(&scope), 0, range.startContainer(), range.startOffset()); ASSERT(testRange->startContainer() == &scope); size_t start = TextIterator::rangeLength(testRange.get()); testRange->setEnd(range.endContainer(), range.endOffset(), IGNORE_EXCEPTION); ASSERT(testRange->startContainer() == &scope); size_t end = TextIterator::rangeLength(testRange.get()); return PlainTextRange(start, end); } }
/*Author--> Vivek Singh Rathore. Date-->16-09-2017 Library demo*/ #include "Complex.h" void Complex::get(){ cout<<"Enter the real and imaginary part\n"; cin>>real>>im; } void Complex::show(){ cout<<"("<<real<<","<<im<<")\n"; } float Complex::abs(){ return sqrt(real*real+im*im); } void Complex::add(Complex c1,Complex c2){ real=c1.real+c2.real; im=c1.im+c2.im; } void Complex::subt(Complex c1,Complex c2){ real=c1.real-c2.real; im=c1.im-c2.im; } void Complex::conj(){ im*=-1; } void Complex::mult(Complex c1,Complex c2){ real=c1.real*c2.real - c1.im*c2.im; im=c1.real*c2.im+c1.im*c2.real; }
/* * Copyright 2019-2020 Douglas Kaip * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * getVkRefreshCycleDurationGOOGLE.cpp * * Created on: Oct 29, 2019 * Author: Douglas Kaip */ #include "JVulkanHelperFunctions.hh" #include "slf4j.hh" namespace jvulkan { void getVkRefreshCycleDurationGOOGLE( JNIEnv *env, const jobject jVkRefreshCycleDurationGOOGLEObject, VkRefreshCycleDurationGOOGLE *vkRefreshCycleDurationGOOGLE, std::vector<void *> *memoryToFree) { jclass theClass = env->GetObjectClass(jVkRefreshCycleDurationGOOGLEObject); if (env->ExceptionOccurred()) { LOGERROR(env, "%s", "Error trying to GetObjectClass for jVkRefreshCycleDurationGOOGLEObject"); return; } //////////////////////////////////////////////////////////////////////// jmethodID methodId = env->GetMethodID(theClass, "getRefreshDuration", "()J"); if (env->ExceptionOccurred()) { LOGERROR(env, "%s", "Could not find method id for getRefreshDuration"); return; } jlong refreshDuration = env->CallLongMethod(jVkRefreshCycleDurationGOOGLEObject, methodId); if (env->ExceptionOccurred()) { LOGERROR(env, "%s", "Error calling CallLongMethod"); return; } vkRefreshCycleDurationGOOGLE->refreshDuration = refreshDuration; } }
// bridge.cpp /** * Copyright (C) 2008 10gen Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "pch.h" #include <boost/thread.hpp> #include "mongo/db/dbmessage.h" #include "mongo/util/net/listen.h" #include "mongo/util/net/message.h" #include "mongo/util/stacktrace.h" using namespace mongo; using namespace std; int port = 0; int delay = 0; string destUri; void cleanup( int sig ); class Forwarder { public: Forwarder( MessagingPort &mp ) : mp_( mp ) { } void operator()() const { DBClientConnection dest; string errmsg; while( !dest.connect( destUri, errmsg ) ) sleepmillis( 500 ); Message m; while( 1 ) { try { m.reset(); if ( !mp_.recv( m ) ) { cout << "end connection " << mp_.psock->remoteString() << endl; mp_.shutdown(); break; } sleepmillis( delay ); int oldId = m.header()->id; if ( m.operation() == dbQuery || m.operation() == dbMsg || m.operation() == dbGetMore ) { bool exhaust = false; if ( m.operation() == dbQuery ) { DbMessage d( m ); QueryMessage q( d ); exhaust = q.queryOptions & QueryOption_Exhaust; } Message response; dest.port().call( m, response ); // nothing to reply with? if ( response.empty() ) cleanup(0); mp_.reply( m, response, oldId ); while ( exhaust ) { MsgData *header = response.header(); QueryResult *qr = (QueryResult *) header; if ( qr->cursorId ) { response.reset(); dest.port().recv( response ); mp_.reply( m, response ); // m argument is ignored anyway } else { exhaust = false; } } } else { dest.port().say( m, oldId ); } } catch ( ... ) { log() << "caught exception in Forwarder, continuing" << endl; } } } private: MessagingPort &mp_; }; set<MessagingPort*>& ports ( *(new std::set<MessagingPort*>()) ); class MyListener : public Listener { public: MyListener( int port ) : Listener( "bridge" , "", port ) {} virtual void acceptedMP(MessagingPort *mp) { ports.insert( mp ); Forwarder f( *mp ); boost::thread t( f ); } }; auto_ptr< MyListener > listener; void cleanup( int sig ) { ListeningSockets::get()->closeAll(); for ( set<MessagingPort*>::iterator i = ports.begin(); i != ports.end(); i++ ) (*i)->shutdown(); ::_exit( 0 ); } #if !defined(_WIN32) void myterminate() { rawOut( "bridge terminate() called, printing stack:" ); printStackTrace(); ::abort(); } void setupSignals() { signal( SIGINT , cleanup ); signal( SIGTERM , cleanup ); signal( SIGPIPE , cleanup ); signal( SIGABRT , cleanup ); signal( SIGSEGV , cleanup ); signal( SIGBUS , cleanup ); signal( SIGFPE , cleanup ); set_terminate( myterminate ); } #else inline void setupSignals() {} #endif void helpExit() { cout << "usage mongobridge --port <port> --dest <destUri> [ --delay <ms> ]" << endl; cout << " port: port to listen for mongo messages" << endl; cout << " destUri: uri of remote mongod instance" << endl; cout << " ms: transfer delay in milliseconds (default = 0)" << endl; ::_exit( -1 ); } void check( bool b ) { if ( !b ) helpExit(); } int main( int argc, char **argv ) { static StaticObserver staticObserver; setupSignals(); check( argc == 5 || argc == 7 ); for( int i = 1; i < argc; ++i ) { check( i % 2 != 0 ); if ( strcmp( argv[ i ], "--port" ) == 0 ) { port = strtol( argv[ ++i ], 0, 10 ); } else if ( strcmp( argv[ i ], "--dest" ) == 0 ) { destUri = argv[ ++i ]; } else if ( strcmp( argv[ i ], "--delay" ) == 0 ) { delay = strtol( argv[ ++i ], 0, 10 ); } else { check( false ); } } check( port != 0 && !destUri.empty() ); listener.reset( new MyListener( port ) ); listener->initAndListen(); return 0; }
// // Created by Katie Barr (EI) on 12/10/2017. // #include <tuple> #include "graph.h" Graph::Graph(){ std::vector<std::vector <int> > bubbles; } /* void Graph::add_bubble(std::vector <int> bubble){ bubbles.push_back(bubble); }*/ std::vector<std::vector <std::string> > Graph::calculate_possible_haplotypes(){ std::vector<std::vector <std::string> > haplotypes; if (bubbles.size() == 0){ return haplotypes; } // this assumes all bubbles have 2 contigs, but does not enforce it std::vector<std::string> first; std::vector<std::string> second; first.push_back(std::get<0>(bubbles[0])); second.push_back(std::get<1>(bubbles[0])); haplotypes.push_back(first); haplotypes.push_back(second); auto to_index = haplotypes.size(); auto from_index = haplotypes.size()/2; std::string b0; std::string b1; for (int j=1; j < bubbles.size(); j++){ b0 = std::get<0>(bubbles[j]); to_index = haplotypes.size(); std::vector<std::vector <std::string> > new_haplotypes; for (auto hap: haplotypes) { new_haplotypes.push_back(hap); } for (auto hap: haplotypes) { new_haplotypes.push_back(hap); } haplotypes = new_haplotypes; for (int i = 0; i < to_index; i++) { haplotypes[i].push_back(b0); } b1 = std::get<1>(bubbles[j]); from_index = (haplotypes.size()) / 2; for (int i = from_index; i < haplotypes.size(); i++) { haplotypes[i].push_back(b1); } } return haplotypes; } std::vector<std::pair<std::string, bool> > Graph::find_next_edges(std::vector<std::pair<std::string, bool> > edges_to_output, std::vector<std::string> edges_seen, std::vector<std::string> bubble_edges, std::set<std::pair<std::string, std::string> > links){ while(links.size() > 0){ for (auto link: links) { std::string next_edge = std::get<0>(link); std::string start_end = std::get<1>(link); // want to go from other end of this seq std::string next_dir = switch_pm[start_end]; links = edge_list[std::make_pair(next_edge, next_dir)]; if (std::find(edges_seen.begin(), edges_seen.end(), next_edge) == edges_seen.end()) { // all links are included in twice to make graph traversal easier - don't want to repeat // if edge is in a bubble, it should be in bubble edges if (std::find(edges_in_bubbles.begin(), edges_in_bubbles.end(), next_edge) != edges_in_bubbles.end()){ if (std::find(bubble_edges.begin(), bubble_edges.end(), next_edge) != bubble_edges.end()){ // add sequence to list to output, as we're going from start, it goes at the front // if its a + link, start of next_edge is joined to start of current edge, so reverse it if (start_end == "+"){ edges_to_output.insert(edges_to_output.begin(), std::make_pair(next_edge, true)); } else { edges_to_output.insert(edges_to_output.begin(), std::make_pair(next_edge, false)); } } } else { //if its not in a bubble, add it if (start_end == "+"){ edges_to_output.insert(edges_to_output.begin(), std::make_pair(next_edge, true)); } else { edges_to_output.insert(edges_to_output.begin(), std::make_pair(next_edge, false)); } } } edges_seen.push_back(next_edge); } } } void Graph::write_output_subgraph(std::vector<std::string> bubble_edges, std::string output_file, std::string sequence_name) { std::vector<std::string> hom_edges; for (auto edge:edges){ if (std::find(edges_in_bubbles.begin(), edges_in_bubbles.end(), edge) == edges_in_bubbles.end()){ hom_edges.push_back(edge); } } std::map < std::pair<std::string, std::string> , std::vector<std::pair<std::string, std::string> > > edges_to_include; // easier- just go through all links- if its a hom link, or included in bubble edges, take it for (auto link:edge_list){ std::string e1_name = std::get<0>(link.first); for (auto joined_to: link.second) { std::string e2_name = std::get<0>(joined_to); if (std::find(hom_edges.begin(), hom_edges.end(), e1_name) != hom_edges.end() && std::find(bubble_edges.begin(), bubble_edges.end(), e2_name) != bubble_edges.end()) { // then this link should be included edges_to_include[link.first].push_back(joined_to); } else if (std::find(hom_edges.begin(), hom_edges.end(), e2_name) != hom_edges.end() && std::find(bubble_edges.begin(), bubble_edges.end(), e1_name) != bubble_edges.end()) { edges_to_include[link.first].push_back(joined_to); } } } // to be able to output this as 1 contig, each edge should be joined once at end, once at start - except end ones bool can_output = can_output_graph_sequence(edges_to_include); std::vector<std::pair<std::string, bool> > edges_to_output; if (can_output){ // need to order/orient contigs - know that apart from ends, each is joined to 1 only at each end // ok, try again, find one of end contigs and just go along auto start_edge = find_start_edge(edges_to_include); auto previous_dir = std::get<1>(start_edge); std::vector<std::pair<std::string, std::string> > next_edge = edges_to_include[std::make_pair(std::get<0>(start_edge), previous_dir)]; edges_to_output.push_back(std::make_pair(std::get<0>(start_edge), false)); auto e = next_edge[0]; auto edge_name = std::get<0>(e); std::string current_dir = std::get<1>(e); auto edge_leaving_other_way = edges_to_include[std::make_pair(edge_name, switch_pm[current_dir])]; while (edge_leaving_other_way.size() != 0) { if (current_dir == previous_dir) { edges_to_output.push_back(std::make_pair(edge_name, false)); } else { edges_to_output.push_back(std::make_pair(edge_name, true)); } edge_leaving_other_way = edges_to_include[std::make_pair(edge_name, current_dir)]; next_edge = edge_leaving_other_way; edge_name = std::get<0>(next_edge[0]); previous_dir = current_dir; current_dir = std::get<1>(next_edge[0]); } write_sequences_to_file(output_file, sequence_name, edges_to_output); } } void Graph::write_sequences_to_file(std::string output_filename, std::string sequence_name, std::vector<std::pair<std::string, bool> > edges_to_output){ std::string sequence; for (auto edge: edges_to_output){ auto seq = nodes[std::get<0>(edge)]; if (std::get<1>(edge)){ std::reverse(seq.begin(), seq.end()); } sequence = sequence + seq; } std::ofstream out(output_filename); out << ">" << sequence_name << std::endl << sequence << std::endl; } std::pair<std::string, std::string> Graph::find_start_edge(std::map < std::pair<std::string, std::string> , std::vector<std::pair<std::string, std::string> > > edges_to_subgraph){ for (auto e: edges_to_subgraph){ std::pair<std::string, std::string> inverse_links = std::make_pair(std::get<0>(e.first), switch_pm[std::get<1>(e.first)]); if (e.second.size() == 0 or edges_to_subgraph.find(inverse_links) == edges_to_subgraph.end()){ std::pair<std::string, std::string> start_edge = std::make_pair(std::get<0>(e.first), std::get<1>(e.first)); return start_edge; } } return std::make_pair("",""); } bool Graph::can_output_graph_sequence(std::map < std::pair<std::string, std::string> , std::vector<std::pair<std::string, std::string> > > edges){ std::map<std::string, std::set<std::string> > edges_start; std::map<std::string, std::set<std::string> > edges_end; // edge dict replicates links- have from_link, from_start_end : to_link to_start_end // avoid repetition by only going through dict keys // need each edge joined to one contig at start, one contig at end for (auto link:edges){ for (auto linked_to: link.second) { if (std::get<1>(link.first) == "+") {// links joined to the end of this std::string edge_from = std::get<0>(link.first); edges_end[edge_from].insert(std::get<0>(linked_to)); } if (std::get<1>(link.first) == "-") { // links joined to start of this - so go before it in list std::string edge_from = std::get<0>(link.first); edges_start[edge_from].insert(std::get<0>(linked_to)); } } } for (auto e: edges_end){ if (e.second.size() > 1){ return false; } } for (auto e: edges_start){ if (e.second.size() > 1){ return false; } } return true; } void Graph::load_gfa(std::string infile_name){ std::ifstream infile(infile_name); std::string line; std::string fields[5]; int counter = 0; std::cout << "Loading GFA file " << infile_name << std::endl; while (std::getline(infile, line)){ std::istringstream(line) >> fields[0] >> fields[1] >> fields[2] >> fields[3] >> fields[4]; // to traverse graph only links are required if (fields[0] == "L"){ edges.insert(fields[1]); edges.insert(fields[3]); std::pair<std::string, std::string> value_fwd = std::make_pair(fields[3], fields[4]); // need to store both ways around to ensure every edge connected to a given node is traversed std::pair<std::string, std::string> value_bwd = std::make_pair(fields[1], switch_pm[fields[2]]); std::pair<std::string, std::string> inverse_link = std::make_pair(fields[3], switch_pm[fields[4]]); //std::string t = switch_pm[fields[2]]; //std::tuple<std::string, std::string> value_bwd = std::make_tuple(t, t); edge_list[fields[1]].push_back(value_fwd); edge_list[std::make_pair(fields[1], fields[2])].insert(value_fwd); //std::pair<std::string, std::string> inverse_link = std::make_pair(fields[3], switch_pm[fields[4]]); edge_list[inverse_link].insert(value_bwd); std::pair<std::string, std::string> pms = std::make_pair(fields[2], fields[4]); original_edge_dirs[std::make_pair(fields[1], fields[3])] = pms; counter +=1; } else if (fields[0] == "S"){ nodes[fields[1]] = fields[2]; } } std::cout << "Loaded GFA with " << counter << " links" << edge_list.size()<<std::endl; } std::pair<std::string, std::string> Graph::check_bubble(std::pair<std::string, std::string> origniating_edge, std::vector<std::pair<std::string, std::string> > adjacent_nodes){ // node list are candidate bubble contigs. if the nodes go to and from same contigs, its a bubble std::set<std::pair<std::string, std::string> > seqs; // to be in the same bubble, the contigs have to join the same ends of the adjacent contigs for (auto node: adjacent_nodes){ for (auto node2: edge_list[node]) { seqs.insert(node2); } std::pair<std::string, std::string> opp_dir_nodes = std::make_pair(std::get<0>(node), switch_pm[std::get<1>(node)]); for (auto node2: edge_list[opp_dir_nodes]) { seqs.insert(node2); } } if (seqs.size() == 2){ // if only 2 sequences joined to all candidate nodes, they are in a bubble // to avoid traversing this part again, return next node and its direction for (auto seq: seqs){ if (seq.first != origniating_edge.first){ for (auto node: adjacent_nodes){ edges_in_bubbles.insert(std::get<0>(node)); } return seq; } } } return std::make_pair("",""); } void Graph::output_contigs_joined_to_contig_list(std::vector<std::string> bubble_edges, std::map<std::string, int > agreeing_barcodes, std::string outfile_name){ // need to be able to reconstruct each haplotype sequence, phaser just outputs contig chcoices, so need inbetween links std::vector<std::string> hom_edges; for (auto edge:edges){ if (std::find(edges_in_bubbles.begin(), edges_in_bubbles.end(), edge) == edges_in_bubbles.end()){ hom_edges.push_back(edge); } } std::map < std::pair<std::string, std::string> , std::vector<std::pair<std::string, std::string> > > edges_to_include; // easier- just go through all links- if its a hom link, or included in bubble edges, take it for (auto link:edge_list){ std::string e1_name = std::get<0>(link.first); for (auto joined_to: link.second) { std::string e2_name = std::get<0>(joined_to); if (std::find(hom_edges.begin(), hom_edges.end(), e1_name) != hom_edges.end() && std::find(bubble_edges.begin(), bubble_edges.end(), e2_name) != bubble_edges.end()) { // then this link should be included edges_to_include[link.first].push_back(joined_to); } else if (std::find(hom_edges.begin(), hom_edges.end(), e2_name) != hom_edges.end() && std::find(bubble_edges.begin(), bubble_edges.end(), e1_name) != bubble_edges.end()) { edges_to_include[link.first].push_back(joined_to); } } } // to be able to output this as 1 contig, each edge should be joined once at end, once at start - except end ones bool can_output = can_output_graph_sequence(edges_to_include); std::vector<std::string > edges_to_output; if (can_output) { // need to order/orient contigs - know that apart from ends, each is joined to 1 only at each end // ok, try again, find one of end contigs and just go along auto start_edge = find_start_edge(edges_to_include); auto previous_dir = std::get<1>(start_edge); std::vector<std::pair<std::string, std::string> > next_edge = edges_to_include[std::make_pair( std::get<0>(start_edge), previous_dir)]; edges_to_output.push_back(std::get<0>(start_edge)); auto e = next_edge[0]; auto edge_name = std::get<0>(e); std::string current_dir = std::get<1>(e); auto edge_leaving_other_way = edges_to_include[std::make_pair(edge_name, switch_pm[current_dir])]; while (edge_leaving_other_way.size() != 0) { edges_to_output.push_back(edge_name); edge_leaving_other_way = edges_to_include[std::make_pair(edge_name, current_dir)]; next_edge = edge_leaving_other_way; edge_name = std::get<0>(next_edge[0]); current_dir = std::get<1>(next_edge[0]); } std::ofstream out(outfile_name); for (auto i=0;i < edges_to_output.size() -1; i++){ auto edge = edges_to_output[i]; auto next_edge = edges_to_output[i+1]; if (original_edge_dirs.find(std::make_pair(edge, next_edge)) != original_edge_dirs.end()){ auto dir = std::get<0>(original_edge_dirs[std::make_pair(edge, next_edge)]); out << edge << dir << ","; } else if (original_edge_dirs.find(std::make_pair(next_edge, edge)) != original_edge_dirs.end()){ // if original link was in opposite dir, need to switch plus/minus auto dir = switch_pm[std::get<0>(original_edge_dirs[std::make_pair(next_edge, edge)])]; out << edge << dir << ","; } } out << "\n"; for (auto b:agreeing_barcodes){ out << b.first << "\n"; } } } //TODO: seen at least one example of this stopping one edge earlier than needed void Graph::traverse_graph(std::string start_node, std::string in_dir, std::vector<std::string > &traversed_edge_list){ // i replicated the links to ensure every on is a key in the dict- now means we can go same way when supposed to go oppotite ways // get nodes joined from other direction- so when we start g std::pair<std::string, std::string> node = std::make_pair(start_node, switch_pm[in_dir]) ;// should probably just feed this in as aprameter std::set<std::pair<std::string, std::string> > adjacent_nodes = edge_list[node]; std::vector<std::pair<std::string, std::string> > adjacent_nodes_vector; for (auto n: adjacent_nodes){ adjacent_nodes_vector.push_back(n); } if (adjacent_nodes.size() == 0){// we can traverse no further return; } else if (adjacent_nodes.size() == 1 && std::find(traversed_edge_list.begin(), traversed_edge_list.end(), std::get<0>(adjacent_nodes_vector[0])) == traversed_edge_list.end()){ //travers to next contig traversed_edge_list.push_back(std::get<0>(adjacent_nodes_vector[0])); traverse_graph(std::get<0>(adjacent_nodes_vector[0]), switch_pm[std::get<1>(adjacent_nodes_vector[0])], traversed_edge_list); } else if (std::find(traversed_edge_list.begin(), traversed_edge_list.end(), std::get<0>(adjacent_nodes_vector[0]))== traversed_edge_list.end() && std::find(traversed_edge_list.begin(), traversed_edge_list.end(), std::get<0>(adjacent_nodes_vector[1]))== traversed_edge_list.end()){ traversed_edge_list.push_back(std::get<0>(adjacent_nodes_vector[0])); traversed_edge_list.push_back(std::get<0>(adjacent_nodes_vector[1])); std::pair<std::string, std::string> contig_other_end_bubble = check_bubble(node, adjacent_nodes_vector); if (std::get<0>(contig_other_end_bubble) != "" & std::get<1>(contig_other_end_bubble) != ""){ //!!!!!! not enforcing bubble degree, but this assumes deg 2.... //std::cout << "adding bubble " << std::get<0>(adjacent_nodes_vector[0]) << " : " << std::get<0>(adjacent_nodes_vector[1]) << std::endl; // really lazy but easiest way to check if edge is hom/het for output edges_in_bubbles.insert(std::get<0>(adjacent_nodes_vector[0])); edges_in_bubbles.insert(std::get<0>(adjacent_nodes_vector[1])); bubbles.push_back(std::make_pair(std::get<0>(adjacent_nodes_vector[0]), std::get<0>(adjacent_nodes_vector[1]))); /// continue traversing from other end of bubble traverse_graph(std::get<0>(contig_other_end_bubble), switch_pm[std::get<1>(contig_other_end_bubble)], traversed_edge_list); } else { // if we've hit something that is not a bubble, we can't phase further, so exit return; } } }
/* @flow */ // Copyright (c) 2012-2013 The PPCoin developers // Copyright (c) 2015-2017 The PIVX developers // Copyright (c) 2018 The OTRchain developers // Distributed under the MIT/X11 software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <boost/assign/list_of.hpp> #include <boost/lexical_cast.hpp> #include "db.h" #include "kernel.h" #include "script/interpreter.h" #include "timedata.h" #include "util.h" #include "main.h" using namespace std; bool fTestNet = false; //Params().NetworkID() == CBaseChainParams::TESTNET; // Modifier interval: time to elapse before new modifier is computed // Set to 3-hour for production network and 20-minute for test network unsigned int nModifierInterval; int nStakeTargetSpacing = 60; unsigned int getIntervalVersion(bool fTestNet) { if (fTestNet) return MODIFIER_INTERVAL_TESTNET; else return MODIFIER_INTERVAL; } // Hard checkpoints of stake modifiers to ensure they are deterministic static std::map<int, unsigned int> mapStakeModifierCheckpoints = boost::assign::map_list_of(0, 0xfd11f4e7u); // Get time weight int64_t GetWeight(int64_t nIntervalBeginning, int64_t nIntervalEnd, int nHeight) { return nIntervalEnd - nIntervalBeginning - GetStakeMinAge(nHeight); } // Get the last stake modifier and its generation time from a given block static bool GetLastStakeModifier(const CBlockIndex* pindex, uint64_t& nStakeModifier, int64_t& nModifierTime) { if (!pindex) return error("GetLastStakeModifier: null pindex"); while (pindex && pindex->pprev && !pindex->GeneratedStakeModifier()) pindex = pindex->pprev; if (!pindex->GeneratedStakeModifier()) return error("GetLastStakeModifier: no generation at genesis block"); nStakeModifier = pindex->nStakeModifier; nModifierTime = pindex->GetBlockTime(); return true; } // Get selection interval section (in seconds) static int64_t GetStakeModifierSelectionIntervalSection(int nSection) { assert(nSection >= 0 && nSection < 64); int64_t a = getIntervalVersion(fTestNet) * 63 / (63 + ((63 - nSection) * (MODIFIER_INTERVAL_RATIO - 1))); return a; } // Get stake modifier selection interval (in seconds) static int64_t GetStakeModifierSelectionInterval() { int64_t nSelectionInterval = 0; for (int nSection = 0; nSection < 64; nSection++) { nSelectionInterval += GetStakeModifierSelectionIntervalSection(nSection); } return nSelectionInterval; } // select a block from the candidate blocks in vSortedByTimestamp, excluding // already selected blocks in vSelectedBlocks, and with timestamp up to // nSelectionIntervalStop. static bool SelectBlockFromCandidates( vector<pair<int64_t, uint256> >& vSortedByTimestamp, map<uint256, const CBlockIndex*>& mapSelectedBlocks, int64_t nSelectionIntervalStop, uint64_t nStakeModifierPrev, const CBlockIndex** pindexSelected) { bool fModifierV2 = false; bool fFirstRun = true; bool fSelected = false; uint256 hashBest = 0; *pindexSelected = (const CBlockIndex*)0; BOOST_FOREACH (const PAIRTYPE(int64_t, uint256) & item, vSortedByTimestamp) { if (!mapBlockIndex.count(item.second)) return error("SelectBlockFromCandidates: failed to find block index for candidate block %s", item.second.ToString().c_str()); const CBlockIndex* pindex = mapBlockIndex[item.second]; if (fSelected && pindex->GetBlockTime() > nSelectionIntervalStop) break; //if the lowest block height (vSortedByTimestamp[0]) is >= switch height, use new modifier calc if (fFirstRun){ fModifierV2 = pindex->nHeight >= Params().ModifierUpgradeBlock(); fFirstRun = false; } if (mapSelectedBlocks.count(pindex->GetBlockHash()) > 0) continue; // compute the selection hash by hashing an input that is unique to that block uint256 hashProof; if(fModifierV2) hashProof = pindex->GetBlockHash(); else hashProof = pindex->IsProofOfStake() ? 0 : pindex->GetBlockHash(); CDataStream ss(SER_GETHASH, 0); ss << hashProof << nStakeModifierPrev; uint256 hashSelection = Hash(ss.begin(), ss.end()); // the selection hash is divided by 2**32 so that proof-of-stake block // is always favored over proof-of-work block. this is to preserve // the energy efficiency property if (pindex->IsProofOfStake()) hashSelection >>= 32; if (fSelected && hashSelection < hashBest) { hashBest = hashSelection; *pindexSelected = (const CBlockIndex*)pindex; } else if (!fSelected) { fSelected = true; hashBest = hashSelection; *pindexSelected = (const CBlockIndex*)pindex; } } if (GetBoolArg("-printstakemodifier", false)) LogPrintf("SelectBlockFromCandidates: selection hash=%s\n", hashBest.ToString().c_str()); return fSelected; } // Stake Modifier (hash modifier of proof-of-stake): // The purpose of stake modifier is to prevent a txout (coin) owner from // computing future proof-of-stake generated by this txout at the time // of transaction confirmation. To meet kernel protocol, the txout // must hash with a future stake modifier to generate the proof. // Stake modifier consists of bits each of which is contributed from a // selected block of a given block group in the past. // The selection of a block is based on a hash of the block's proof-hash and // the previous stake modifier. // Stake modifier is recomputed at a fixed time interval instead of every // block. This is to make it difficult for an attacker to gain control of // additional bits in the stake modifier, even after generating a chain of // blocks. bool ComputeNextStakeModifier(const CBlockIndex* pindexPrev, uint64_t& nStakeModifier, bool& fGeneratedStakeModifier) { nStakeModifier = 0; fGeneratedStakeModifier = false; if (!pindexPrev) { fGeneratedStakeModifier = true; return true; // genesis block's modifier is 0 } if (pindexPrev->nHeight == 0) { //Give a stake modifier to the first block fGeneratedStakeModifier = true; nStakeModifier = uint64_t("stakemodifier"); return true; } // First find current stake modifier and its generation block time // if it's not old enough, return the same stake modifier int64_t nModifierTime = 0; if (!GetLastStakeModifier(pindexPrev, nStakeModifier, nModifierTime)) return error("ComputeNextStakeModifier: unable to get last modifier"); if (GetBoolArg("-printstakemodifier", false)) LogPrintf("ComputeNextStakeModifier: prev modifier= %s time=%s\n", boost::lexical_cast<std::string>(nStakeModifier).c_str(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nModifierTime).c_str()); if (nModifierTime / getIntervalVersion(fTestNet) >= pindexPrev->GetBlockTime() / getIntervalVersion(fTestNet)) return true; // Sort candidate blocks by timestamp vector<pair<int64_t, uint256> > vSortedByTimestamp; vSortedByTimestamp.reserve(64 * getIntervalVersion(fTestNet) / nStakeTargetSpacing); int64_t nSelectionInterval = GetStakeModifierSelectionInterval(); int64_t nSelectionIntervalStart = (pindexPrev->GetBlockTime() / getIntervalVersion(fTestNet)) * getIntervalVersion(fTestNet) - nSelectionInterval; const CBlockIndex* pindex = pindexPrev; while (pindex && pindex->GetBlockTime() >= nSelectionIntervalStart) { vSortedByTimestamp.push_back(make_pair(pindex->GetBlockTime(), pindex->GetBlockHash())); pindex = pindex->pprev; } int nHeightFirstCandidate = pindex ? (pindex->nHeight + 1) : 0; reverse(vSortedByTimestamp.begin(), vSortedByTimestamp.end()); sort(vSortedByTimestamp.begin(), vSortedByTimestamp.end()); // Select 64 blocks from candidate blocks to generate stake modifier uint64_t nStakeModifierNew = 0; int64_t nSelectionIntervalStop = nSelectionIntervalStart; map<uint256, const CBlockIndex*> mapSelectedBlocks; for (int nRound = 0; nRound < min(64, (int)vSortedByTimestamp.size()); nRound++) { // add an interval section to the current selection round nSelectionIntervalStop += GetStakeModifierSelectionIntervalSection(nRound); // select a block from the candidates of current round if (!SelectBlockFromCandidates(vSortedByTimestamp, mapSelectedBlocks, nSelectionIntervalStop, nStakeModifier, &pindex)) return error("ComputeNextStakeModifier: unable to select block at round %d", nRound); // write the entropy bit of the selected block nStakeModifierNew |= (((uint64_t)pindex->GetStakeEntropyBit()) << nRound); // add the selected block from candidates to selected list mapSelectedBlocks.insert(make_pair(pindex->GetBlockHash(), pindex)); if (fDebug || GetBoolArg("-printstakemodifier", false)) LogPrintf("ComputeNextStakeModifier: selected round %d stop=%s height=%d bit=%d\n", nRound, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nSelectionIntervalStop).c_str(), pindex->nHeight, pindex->GetStakeEntropyBit()); } // Print selection map for visualization of the selected blocks if (fDebug || GetBoolArg("-printstakemodifier", false)) { string strSelectionMap = ""; // '-' indicates proof-of-work blocks not selected strSelectionMap.insert(0, pindexPrev->nHeight - nHeightFirstCandidate + 1, '-'); pindex = pindexPrev; while (pindex && pindex->nHeight >= nHeightFirstCandidate) { // '=' indicates proof-of-stake blocks not selected if (pindex->IsProofOfStake()) strSelectionMap.replace(pindex->nHeight - nHeightFirstCandidate, 1, "="); pindex = pindex->pprev; } BOOST_FOREACH (const PAIRTYPE(uint256, const CBlockIndex*) & item, mapSelectedBlocks) { // 'S' indicates selected proof-of-stake blocks // 'W' indicates selected proof-of-work blocks strSelectionMap.replace(item.second->nHeight - nHeightFirstCandidate, 1, item.second->IsProofOfStake() ? "S" : "W"); } LogPrintf("ComputeNextStakeModifier: selection height [%d, %d] map %s\n", nHeightFirstCandidate, pindexPrev->nHeight, strSelectionMap.c_str()); } if (fDebug || GetBoolArg("-printstakemodifier", false)) { LogPrintf("ComputeNextStakeModifier: new modifier=%s time=%s\n", boost::lexical_cast<std::string>(nStakeModifierNew).c_str(), DateTimeStrFormat("%Y-%m-%d %H:%M:%S", pindexPrev->GetBlockTime()).c_str()); } nStakeModifier = nStakeModifierNew; fGeneratedStakeModifier = true; return true; } // The stake modifier used to hash for a stake kernel is chosen as the stake // modifier about a selection interval later than the coin generating the kernel bool GetKernelStakeModifier(uint256 hashBlockFrom, uint64_t& nStakeModifier, int& nStakeModifierHeight, int64_t& nStakeModifierTime, bool fPrintProofOfStake) { nStakeModifier = 0; if (!mapBlockIndex.count(hashBlockFrom)) return error("GetKernelStakeModifier() : block not indexed"); const CBlockIndex* pindexFrom = mapBlockIndex[hashBlockFrom]; nStakeModifierHeight = pindexFrom->nHeight; nStakeModifierTime = pindexFrom->GetBlockTime(); int64_t nStakeModifierSelectionInterval = GetStakeModifierSelectionInterval(); const CBlockIndex* pindex = pindexFrom; CBlockIndex* pindexNext = chainActive[pindexFrom->nHeight + 1]; // loop to find the stake modifier later by a selection interval while (nStakeModifierTime < pindexFrom->GetBlockTime() + nStakeModifierSelectionInterval) { if (!pindexNext) { // Should never happen return error("Null pindexNext\n"); } pindex = pindexNext; pindexNext = chainActive[pindexNext->nHeight + 1]; if (pindex->GeneratedStakeModifier()) { nStakeModifierHeight = pindex->nHeight; nStakeModifierTime = pindex->GetBlockTime(); } } nStakeModifier = pindex->nStakeModifier; return true; } uint256 stakeHash(unsigned int nTimeTx, CDataStream ss, unsigned int prevoutIndex, uint256 prevoutHash, unsigned int nTimeBlockFrom) { //OTRchain will hash in the transaction hash and the index number in order to make sure each hash is unique ss << nTimeBlockFrom << prevoutIndex << prevoutHash << nTimeTx; return Hash(ss.begin(), ss.end()); } //test hash vs target bool stakeTargetHit(uint256 hashProofOfStake, int64_t nValueIn, uint256 bnTargetPerCoinDay) { //get the stake weight - weight is equal to coin amount uint256 bnCoinDayWeight = uint256(nValueIn) / 100; // Now check if proof-of-stake hash meets target protocol return (uint256(hashProofOfStake) < bnCoinDayWeight * bnTargetPerCoinDay); } //instead of looping outside and reinitializing variables many times, we will give a nTimeTx and also search interval so that we can do all the hashing here bool CheckStakeKernelHash(unsigned int nBits, const CBlock blockFrom, const CTransaction txPrev, const COutPoint prevout, unsigned int& nTimeTx, unsigned int nHashDrift, bool fCheck, uint256& hashProofOfStake, bool fPrintProofOfStake) { //assign new variables to make it easier to read int64_t nValueIn = txPrev.vout[prevout.n].nValue; unsigned int nTimeBlockFrom = blockFrom.GetBlockTime(); if (nTimeTx < nTimeBlockFrom) // Transaction timestamp violation return error("CheckStakeKernelHash() : nTime violation"); //grab difficulty uint256 bnTargetPerCoinDay; bnTargetPerCoinDay.SetCompact(nBits); //grab stake modifier uint64_t nStakeModifier = 0; int nStakeModifierHeight = 0; int64_t nStakeModifierTime = 0; if (!GetKernelStakeModifier(blockFrom.GetHash(), nStakeModifier, nStakeModifierHeight, nStakeModifierTime, fPrintProofOfStake)) { LogPrintf("CheckStakeKernelHash(): failed to get kernel stake modifier \n"); return false; } if (nTimeBlockFrom + GetStakeMinAge(nStakeModifierHeight) > nTimeTx) // Min age requirement return error("CheckStakeKernelHash() : min age violation - nTimeBlockFrom=%d nStakeMinAge=%d nTimeTx=%d", nTimeBlockFrom, GetStakeMinAge(nStakeModifierHeight), nTimeTx); //create data stream once instead of repeating it in the loop CDataStream ss(SER_GETHASH, 0); ss << nStakeModifier; //if wallet is simply checking to make sure a hash is valid if (fCheck) { hashProofOfStake = stakeHash(nTimeTx, ss, prevout.n, prevout.hash, nTimeBlockFrom); return stakeTargetHit(hashProofOfStake, nValueIn, bnTargetPerCoinDay); } bool fSuccess = false; unsigned int nTryTime = 0; unsigned int i; int nHeightStart = chainActive.Height(); for (i = 0; i < (nHashDrift); i++) //iterate the hashing { //new block came in, move on if (chainActive.Height() != nHeightStart) break; //hash this iteration nTryTime = nTimeTx + nHashDrift - i; hashProofOfStake = stakeHash(nTryTime, ss, prevout.n, prevout.hash, nTimeBlockFrom); // if stake hash does not meet the target then continue to next iteration if (!stakeTargetHit(hashProofOfStake, nValueIn, bnTargetPerCoinDay)) continue; fSuccess = true; // if we make it this far then we have successfully created a stake hash nTimeTx = nTryTime; if (fDebug || fPrintProofOfStake) { LogPrintf("CheckStakeKernelHash() : using modifier %s at height=%d timestamp=%s for block from height=%d timestamp=%s\n", boost::lexical_cast<std::string>(nStakeModifier).c_str(), nStakeModifierHeight, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", nStakeModifierTime).c_str(), mapBlockIndex[blockFrom.GetHash()]->nHeight, DateTimeStrFormat("%Y-%m-%d %H:%M:%S", blockFrom.GetBlockTime()).c_str()); LogPrintf("CheckStakeKernelHash() : pass protocol=%s modifier=%s nTimeBlockFrom=%u prevoutHash=%s nTimeTxPrev=%u nPrevout=%u nTimeTx=%u hashProof=%s\n", "0.3", boost::lexical_cast<std::string>(nStakeModifier).c_str(), nTimeBlockFrom, prevout.hash.ToString().c_str(), nTimeBlockFrom, prevout.n, nTryTime, hashProofOfStake.ToString().c_str()); } break; } mapHashedBlocks.clear(); mapHashedBlocks[chainActive.Tip()->nHeight] = GetTime(); //store a time stamp of when we last hashed on this block return fSuccess; } // Check kernel hash target and coinstake signature bool CheckProofOfStake(const CBlock block, uint256& hashProofOfStake) { const CTransaction tx = block.vtx[1]; if (!tx.IsCoinStake()) return error("CheckProofOfStake() : called on non-coinstake %s", tx.GetHash().ToString().c_str()); // Kernel (input 0) must match the stake hash target per coin age (nBits) const CTxIn& txin = tx.vin[0]; // First try finding the previous transaction in database uint256 hashBlock; CTransaction txPrev; if (!GetTransaction(txin.prevout.hash, txPrev, hashBlock, true)) return error("CheckProofOfStake() : INFO: read txPrev failed"); //verify signature and script if (!VerifyScript(txin.scriptSig, txPrev.vout[txin.prevout.n].scriptPubKey, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&tx, 0))) return error("CheckProofOfStake() : VerifySignature failed on coinstake %s", tx.GetHash().ToString().c_str()); CBlockIndex* pindex = NULL; BlockMap::iterator it = mapBlockIndex.find(hashBlock); if (it != mapBlockIndex.end()) pindex = it->second; else return error("CheckProofOfStake() : read block failed"); // Read block header CBlock blockprev; if (!ReadBlockFromDisk(blockprev, pindex->GetBlockPos())) return error("CheckProofOfStake(): INFO: failed to find block"); unsigned int nInterval = 0; unsigned int nTime = block.nTime; if (!CheckStakeKernelHash(block.nBits, blockprev, txPrev, txin.prevout, nTime, nInterval, true, hashProofOfStake, fDebug)) return error("CheckProofOfStake() : INFO: check kernel failed on coinstake %s, hashProof=%s \n", tx.GetHash().ToString().c_str(), hashProofOfStake.ToString().c_str()); // may occur during initial download or if behind on block chain sync return true; } // Check whether the coinstake timestamp meets protocol bool CheckCoinStakeTimestamp(int64_t nTimeBlock, int64_t nTimeTx) { // v0.3 protocol return (nTimeBlock == nTimeTx); } // Get stake modifier checksum unsigned int GetStakeModifierChecksum(const CBlockIndex* pindex) { assert(pindex->pprev || pindex->GetBlockHash() == Params().HashGenesisBlock()); // Hash previous checksum with flags, hashProofOfStake and nStakeModifier CDataStream ss(SER_GETHASH, 0); if (pindex->pprev) ss << pindex->pprev->nStakeModifierChecksum; ss << pindex->nFlags << pindex->hashProofOfStake << pindex->nStakeModifier; uint256 hashChecksum = Hash(ss.begin(), ss.end()); hashChecksum >>= (256 - 32); return hashChecksum.Get64(); } // Check stake modifier hard checkpoints bool CheckStakeModifierCheckpoints(int nHeight, unsigned int nStakeModifierChecksum) { if (fTestNet) return true; // Testnet has no checkpoints if (mapStakeModifierCheckpoints.count(nHeight)) { return nStakeModifierChecksum == mapStakeModifierCheckpoints[nHeight]; } return true; }
/* ============================================================================== This file is part of the juce_core module of the JUCE library. Copyright (c) 2013 - Raw Material Software Ltd. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ------------------------------------------------------------------------------ NOTE! This permissive ISC license applies ONLY to files within the juce_core module! All other JUCE modules are covered by a dual GPL/commercial license, so if you are using any other modules, be sure to check that you also comply with their license. For more details, visit www.juce.com ============================================================================== */ #if JUCE_MSVC #pragma warning (push) #pragma warning (disable : 4127 4389 4018) #endif #ifndef AI_NUMERICSERV // (missing in older Mac SDKs) #define AI_NUMERICSERV 0x1000 #endif #if JUCE_WINDOWS typedef int juce_socklen_t; typedef SOCKET SocketHandle; #else typedef socklen_t juce_socklen_t; typedef int SocketHandle; #endif //============================================================================== namespace SocketHelpers { static void initSockets() { #if JUCE_WINDOWS static bool socketsStarted = false; if (! socketsStarted) { socketsStarted = true; WSADATA wsaData; const WORD wVersionRequested = MAKEWORD (1, 1); WSAStartup (wVersionRequested, &wsaData); } #endif } static bool resetSocketOptions (const SocketHandle handle, const bool isDatagram, const bool allowBroadcast) noexcept { const int sndBufSize = 65536; const int rcvBufSize = 65536; const int one = 1; return handle > 0 && setsockopt (handle, SOL_SOCKET, SO_RCVBUF, (const char*) &rcvBufSize, sizeof (rcvBufSize)) == 0 && setsockopt (handle, SOL_SOCKET, SO_SNDBUF, (const char*) &sndBufSize, sizeof (sndBufSize)) == 0 && (isDatagram ? ((! allowBroadcast) || setsockopt (handle, SOL_SOCKET, SO_BROADCAST, (const char*) &one, sizeof (one)) == 0) : (setsockopt (handle, IPPROTO_TCP, TCP_NODELAY, (const char*) &one, sizeof (one)) == 0)); } static void closeSocket (volatile int& handle, CriticalSection& readLock, const bool isListener, int portNumber, bool& connected) noexcept { const SocketHandle h = handle; handle = -1; #if JUCE_WINDOWS ignoreUnused (portNumber, isListener, readLock); if (h != SOCKET_ERROR || connected) closesocket (h); // make sure any read process finishes before we delete the socket CriticalSection::ScopedLockType lock(readLock); connected = false; #else if (connected) { connected = false; if (isListener) { // need to do this to interrupt the accept() function.. StreamingSocket temp; temp.connect (IPAddress::local().toString(), portNumber, 1000); } } if (h != -1) { // unblock any pending read requests ::shutdown (h, SHUT_RDWR); { // see man-page of recv on linux about a race condition where the // shutdown command is lost if the receiving thread does not have // a chance to process before close is called. On Mac OS X shutdown // does not unblock a select call, so using a lock here will dead-lock // both threads. #if JUCE_LINUX CriticalSection::ScopedLockType lock (readLock); ::close (h); #else ::close (h); CriticalSection::ScopedLockType lock (readLock); #endif } } #endif } static bool bindSocket (const SocketHandle handle, const int port, const String& address) noexcept { if (handle <= 0 || port < 0) return false; struct sockaddr_in servTmpAddr; zerostruct (servTmpAddr); // (can't use "= { 0 }" on this object because it's typedef'ed as a C struct) servTmpAddr.sin_family = PF_INET; servTmpAddr.sin_addr.s_addr = htonl (INADDR_ANY); servTmpAddr.sin_port = htons ((uint16) port); if (address.isNotEmpty()) servTmpAddr.sin_addr.s_addr = ::inet_addr (address.toUTF8()); return bind (handle, (struct sockaddr*) &servTmpAddr, sizeof (struct sockaddr_in)) >= 0; } static int getBoundPort (const SocketHandle handle) noexcept { if (handle <= 0) return -1; struct sockaddr_in sin_addr; socklen_t len = sizeof (sin_addr); if (getsockname (handle, (struct sockaddr*) &sin_addr, &len) == 0) return ntohs (sin_addr.sin_port); return -1; } static int readSocket (const SocketHandle handle, void* const destBuffer, const int maxBytesToRead, bool volatile& connected, const bool blockUntilSpecifiedAmountHasArrived, CriticalSection& readLock, String* senderIP = nullptr, int* senderPort = nullptr) noexcept { int bytesRead = 0; while (bytesRead < maxBytesToRead) { long bytesThisTime = -1; char* const buffer = static_cast<char*> (destBuffer) + bytesRead; const juce_socklen_t numToRead = (juce_socklen_t) (maxBytesToRead - bytesRead); { // avoid race-condition CriticalSection::ScopedTryLockType lock (readLock); if (lock.isLocked()) { if (senderIP == nullptr || senderPort == nullptr) { bytesThisTime = ::recv (handle, buffer, numToRead, 0); } else { sockaddr_in client; socklen_t clientLen = sizeof (sockaddr); bytesThisTime = ::recvfrom (handle, buffer, numToRead, 0, (sockaddr*) &client, &clientLen); *senderIP = String::fromUTF8 (inet_ntoa (client.sin_addr), 16); *senderPort = ntohs (client.sin_port); } } } if (bytesThisTime <= 0 || ! connected) { if (bytesRead == 0) bytesRead = -1; break; } bytesRead += bytesThisTime; if (! blockUntilSpecifiedAmountHasArrived) break; } return (int) bytesRead; } static int waitForReadiness (const volatile int& handle, CriticalSection& readLock, const bool forReading, const int timeoutMsecs) noexcept { // avoid race-condition CriticalSection::ScopedTryLockType lock (readLock); if (! lock.isLocked()) return -1; int h = handle; struct timeval timeout; struct timeval* timeoutp; if (timeoutMsecs >= 0) { timeout.tv_sec = timeoutMsecs / 1000; timeout.tv_usec = (timeoutMsecs % 1000) * 1000; timeoutp = &timeout; } else { timeoutp = 0; } fd_set rset, wset; FD_ZERO (&rset); FD_SET (h, &rset); FD_ZERO (&wset); FD_SET (h, &wset); fd_set* const prset = forReading ? &rset : nullptr; fd_set* const pwset = forReading ? nullptr : &wset; #if JUCE_WINDOWS if (select ((int) h + 1, prset, pwset, 0, timeoutp) < 0) return -1; #else { int result; while ((result = select (h + 1, prset, pwset, 0, timeoutp)) < 0 && errno == EINTR) { } if (result < 0) return -1; } #endif // we are closing if (handle < 0) return -1; { int opt; juce_socklen_t len = sizeof (opt); if (getsockopt (h, SOL_SOCKET, SO_ERROR, (char*) &opt, &len) < 0 || opt != 0) return -1; } return FD_ISSET (h, forReading ? &rset : &wset) ? 1 : 0; } static bool setSocketBlockingState (const SocketHandle handle, const bool shouldBlock) noexcept { #if JUCE_WINDOWS u_long nonBlocking = shouldBlock ? 0 : (u_long) 1; return ioctlsocket (handle, FIONBIO, &nonBlocking) == 0; #else int socketFlags = fcntl (handle, F_GETFL, 0); if (socketFlags == -1) return false; if (shouldBlock) socketFlags &= ~O_NONBLOCK; else socketFlags |= O_NONBLOCK; return fcntl (handle, F_SETFL, socketFlags) == 0; #endif } static addrinfo* getAddressInfo (const bool isDatagram, const String& hostName, int portNumber) { struct addrinfo hints; zerostruct (hints); hints.ai_family = AF_UNSPEC; hints.ai_socktype = isDatagram ? SOCK_DGRAM : SOCK_STREAM; hints.ai_flags = AI_NUMERICSERV; struct addrinfo* info = nullptr; if (getaddrinfo (hostName.toUTF8(), String (portNumber).toUTF8(), &hints, &info) == 0 && info != nullptr) return info; return nullptr; } static bool connectSocket (int volatile& handle, CriticalSection& readLock, const String& hostName, const int portNumber, const int timeOutMillisecs) noexcept { if (struct addrinfo* info = getAddressInfo (false, hostName, portNumber)) { if (handle < 0) handle = (int) socket (info->ai_family, info->ai_socktype, 0); if (handle < 0) { freeaddrinfo (info); return false; } setSocketBlockingState (handle, false); const int result = ::connect (handle, info->ai_addr, (socklen_t) info->ai_addrlen); freeaddrinfo (info); if (result < 0) { #if JUCE_WINDOWS if (result == SOCKET_ERROR && WSAGetLastError() == WSAEWOULDBLOCK) #else if (errno == EINPROGRESS) #endif { if (waitForReadiness (handle, readLock, false, timeOutMillisecs) != 1) { setSocketBlockingState (handle, true); return false; } } } setSocketBlockingState (handle, true); resetSocketOptions (handle, false, false); return true; } return false; } static void makeReusable (int handle) noexcept { const int reuse = 1; setsockopt (handle, SOL_SOCKET, SO_REUSEADDR, (const char*) &reuse, sizeof (reuse)); } static bool multicast (int handle, const String& multicastIPAddress, const String& interfaceIPAddress, bool join) noexcept { struct ip_mreq mreq; zerostruct (mreq); mreq.imr_multiaddr.s_addr = inet_addr (multicastIPAddress.toUTF8()); mreq.imr_interface.s_addr = INADDR_ANY; if (interfaceIPAddress.isNotEmpty()) mreq.imr_interface.s_addr = inet_addr (interfaceIPAddress.toUTF8()); int joinCmd = join ? IP_ADD_MEMBERSHIP : IP_DROP_MEMBERSHIP; return setsockopt (handle, IPPROTO_IP, joinCmd, (const char*) &mreq, sizeof (mreq)) == 0; } } //============================================================================== StreamingSocket::StreamingSocket() : portNumber (0), handle (-1), connected (false), isListener (false) { SocketHelpers::initSockets(); } StreamingSocket::StreamingSocket (const String& host, int portNum, int h) : hostName (host), portNumber (portNum), handle (h), connected (true), isListener (false) { SocketHelpers::initSockets(); SocketHelpers::resetSocketOptions (h, false, false); } StreamingSocket::~StreamingSocket() { close(); } //============================================================================== int StreamingSocket::read (void* destBuffer, const int maxBytesToRead, bool shouldBlock) { return (connected && ! isListener) ? SocketHelpers::readSocket (handle, destBuffer, maxBytesToRead, connected, shouldBlock, readLock) : -1; } int StreamingSocket::write (const void* sourceBuffer, const int numBytesToWrite) { if (isListener || ! connected) return -1; return (int) ::send (handle, (const char*) sourceBuffer, (juce_socklen_t) numBytesToWrite, 0); } //============================================================================== int StreamingSocket::waitUntilReady (const bool readyForReading, const int timeoutMsecs) const { return connected ? SocketHelpers::waitForReadiness (handle, readLock, readyForReading, timeoutMsecs) : -1; } //============================================================================== bool StreamingSocket::bindToPort (const int port) { return bindToPort (port, String()); } bool StreamingSocket::bindToPort (const int port, const String& addr) { return SocketHelpers::bindSocket (handle, port, addr); } int StreamingSocket::getBoundPort() const noexcept { return SocketHelpers::getBoundPort (handle); } bool StreamingSocket::connect (const String& remoteHostName, const int remotePortNumber, const int timeOutMillisecs) { if (isListener) { jassertfalse; // a listener socket can't connect to another one! return false; } if (connected) close(); hostName = remoteHostName; portNumber = remotePortNumber; isListener = false; connected = SocketHelpers::connectSocket (handle, readLock, remoteHostName, remotePortNumber, timeOutMillisecs); if (! (connected && SocketHelpers::resetSocketOptions (handle, false, false))) { close(); return false; } return true; } void StreamingSocket::close() { SocketHelpers::closeSocket (handle, readLock, isListener, portNumber, connected); hostName.clear(); portNumber = 0; handle = -1; isListener = false; } //============================================================================== bool StreamingSocket::createListener (const int newPortNumber, const String& localHostName) { if (connected) close(); hostName = "listener"; portNumber = newPortNumber; isListener = true; struct sockaddr_in servTmpAddr; zerostruct (servTmpAddr); servTmpAddr.sin_family = PF_INET; servTmpAddr.sin_addr.s_addr = htonl (INADDR_ANY); if (localHostName.isNotEmpty()) servTmpAddr.sin_addr.s_addr = ::inet_addr (localHostName.toUTF8()); servTmpAddr.sin_port = htons ((uint16) portNumber); handle = (int) socket (AF_INET, SOCK_STREAM, 0); if (handle < 0) return false; #if ! JUCE_WINDOWS // on windows, adding this option produces behaviour different to posix SocketHelpers::makeReusable (handle); #endif if (bind (handle, (struct sockaddr*) &servTmpAddr, sizeof (struct sockaddr_in)) < 0 || listen (handle, SOMAXCONN) < 0) { close(); return false; } connected = true; return true; } StreamingSocket* StreamingSocket::waitForNextConnection() const { // To call this method, you first have to use createListener() to // prepare this socket as a listener. jassert (isListener || ! connected); if (connected && isListener) { struct sockaddr_storage address; juce_socklen_t len = sizeof (address); const int newSocket = (int) accept (handle, (struct sockaddr*) &address, &len); if (newSocket >= 0 && connected) return new StreamingSocket (inet_ntoa (((struct sockaddr_in*) &address)->sin_addr), portNumber, newSocket); } return nullptr; } bool StreamingSocket::isLocal() const noexcept { return hostName == "127.0.0.1"; } //============================================================================== //============================================================================== DatagramSocket::DatagramSocket (const bool canBroadcast) : handle (-1), isBound (false), lastServerPort (-1), lastServerAddress (nullptr) { SocketHelpers::initSockets(); handle = (int) socket (AF_INET, SOCK_DGRAM, 0); SocketHelpers::resetSocketOptions (handle, true, canBroadcast); SocketHelpers::makeReusable (handle); } DatagramSocket::~DatagramSocket() { if (lastServerAddress != nullptr) freeaddrinfo (static_cast <struct addrinfo*> (lastServerAddress)); bool connected = false; SocketHelpers::closeSocket (handle, readLock, false, 0, connected); } bool DatagramSocket::bindToPort (const int port) { return bindToPort (port, String()); } bool DatagramSocket::bindToPort (const int port, const String& addr) { if (SocketHelpers::bindSocket (handle, port, addr)) { isBound = true; lastBindAddress = addr; return true; } return false; } int DatagramSocket::getBoundPort() const noexcept { return isBound ? SocketHelpers::getBoundPort (handle) : -1; } //============================================================================== int DatagramSocket::waitUntilReady (const bool readyForReading, const int timeoutMsecs) const { return SocketHelpers::waitForReadiness (handle, readLock, readyForReading, timeoutMsecs); } int DatagramSocket::read (void* destBuffer, int maxBytesToRead, bool shouldBlock) { bool connected = true; return isBound ? SocketHelpers::readSocket (handle, destBuffer, maxBytesToRead, connected, shouldBlock, readLock) : -1; } int DatagramSocket::read (void* destBuffer, int maxBytesToRead, bool shouldBlock, String& senderIPAddress, int& senderPort) { bool connected = true; return isBound ? SocketHelpers::readSocket (handle, destBuffer, maxBytesToRead, connected, shouldBlock, readLock, &senderIPAddress, &senderPort) : -1; } int DatagramSocket::write (const String& remoteHostname, int remotePortNumber, const void* sourceBuffer, int numBytesToWrite) { struct addrinfo*& info = reinterpret_cast <struct addrinfo*&> (lastServerAddress); // getaddrinfo can be quite slow so cache the result of the address lookup if (info == nullptr || remoteHostname != lastServerHost || remotePortNumber != lastServerPort) { if (info != nullptr) freeaddrinfo (info); if ((info = SocketHelpers::getAddressInfo (true, remoteHostname, remotePortNumber)) == nullptr) return -1; lastServerHost = remoteHostname; lastServerPort = remotePortNumber; } return (int) ::sendto (handle, (const char*) sourceBuffer, (juce_socklen_t) numBytesToWrite, 0, info->ai_addr, (socklen_t) info->ai_addrlen); } bool DatagramSocket::joinMulticast (const String& multicastIPAddress) { if (! isBound) return false; return SocketHelpers::multicast (handle, multicastIPAddress, lastBindAddress, true); } bool DatagramSocket::leaveMulticast (const String& multicastIPAddress) { if (! isBound) return false; return SocketHelpers::multicast (handle, multicastIPAddress, lastBindAddress, false); } #if JUCE_MSVC #pragma warning (pop) #endif
#include "../tic_tac_toe.cpp" #include "package_mgr.h" int main(int argc, char** argv) { run_tic_tac_toe<buddy_mgr>(argc, argv); }
//----------------------------------------------------------------------------- // Copyright (c) 2013 GarageGames, LLC // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. //----------------------------------------------------------------------------- #include "persistence/taml/tamlBinaryReader.h" #ifndef _ZIPSUBSTREAM_H_ #include "io/zip/zipSubStream.h" #endif // Debug Profiling. #include "debug/profiler.h" //----------------------------------------------------------------------------- SimObject* TamlBinaryReader::read( FileStream& stream ) { // Debug Profiling. PROFILE_SCOPE(TamlBinaryReader_Read); // Read Taml signature. StringTableEntry tamlSignature = stream.readSTString(); // Is the signature correct? if ( tamlSignature != StringTable->insert( TAML_SIGNATURE ) ) { // Warn. Con::warnf("Taml: Cannot read binary file as signature is incorrect '%s'.", tamlSignature ); return NULL; } // Read version Id. U32 versionId; stream.read( &versionId ); // Read compressed flag. bool compressed; stream.read( &compressed ); SimObject* pSimObject = NULL; // Is the stream compressed? if ( compressed ) { // Yes, so attach zip stream. ZipSubRStream zipStream; zipStream.attachStream( &stream ); // Parse element. pSimObject = parseElement( zipStream, versionId ); // Detach zip stream. zipStream.detachStream(); } else { // No, so parse element. pSimObject = parseElement( stream, versionId ); } return pSimObject; } //----------------------------------------------------------------------------- void TamlBinaryReader::resetParse( void ) { // Debug Profiling. PROFILE_SCOPE(TamlBinaryReader_ResetParse); // Clear object reference map. mObjectReferenceMap.clear(); } //----------------------------------------------------------------------------- SimObject* TamlBinaryReader::parseElement( Stream& stream, const U32 versionId ) { // Debug Profiling. PROFILE_SCOPE(TamlBinaryReader_ParseElement); SimObject* pSimObject = NULL; #ifdef TORQUE_DEBUG // Format the type location. char typeLocationBuffer[64]; dSprintf( typeLocationBuffer, sizeof(typeLocationBuffer), "Taml [format='binary' offset=%u]", stream.getPosition() ); #endif // Fetch element name. StringTableEntry typeName = stream.readSTString(); // Fetch object name. StringTableEntry objectName = stream.readSTString(); // Read references. U32 tamlRefId; U32 tamlRefToId; stream.read( &tamlRefId ); stream.read( &tamlRefToId ); // Do we have a reference to Id? if ( tamlRefToId != 0 ) { // Yes, so fetch reference. typeObjectReferenceHash::iterator referenceItr = mObjectReferenceMap.find( tamlRefToId ); // Did we find the reference? if ( referenceItr == mObjectReferenceMap.end() ) { // No, so warn. Con::warnf( "Taml: Could not find a reference Id of '%d'", tamlRefToId ); return NULL; } // Return object. return referenceItr->value; } #ifdef TORQUE_DEBUG // Create type. pSimObject = Taml::createType( typeName, mpTaml, typeLocationBuffer ); #else // Create type. pSimObject = Taml::createType( typeName, mpTaml ); #endif // Finish if we couldn't create the type. if ( pSimObject == NULL ) return NULL; // Find Taml callbacks. TamlCallbacks* pCallbacks = dynamic_cast<TamlCallbacks*>( pSimObject ); // Are there any Taml callbacks? if ( pCallbacks != NULL ) { // Yes, so call it. mpTaml->tamlPreRead( pCallbacks ); } // Parse attributes. parseAttributes( stream, pSimObject, versionId ); // Does the object require a name? if ( objectName == StringTable->EmptyString ) { // No, so just register anonymously. pSimObject->registerObject(); } else { // Yes, so register a named object. pSimObject->registerObject( objectName ); // Was the name assigned? if ( pSimObject->getName() != objectName ) { // No, so warn that the name was rejected. #ifdef TORQUE_DEBUG Con::warnf( "Taml::parseElement() - Registered an instance of type '%s' but a request to name it '%s' was rejected. This is typically because an object of that name already exists. '%s'", typeName, objectName, typeLocationBuffer ); #else Con::warnf( "Taml::parseElement() - Registered an instance of type '%s' but a request to name it '%s' was rejected. This is typically because an object of that name already exists.", typeName, objectName ); #endif } } // Do we have a reference Id? if ( tamlRefId != 0 ) { // Yes, so insert reference. mObjectReferenceMap.insert( tamlRefId, pSimObject ); } // Parse custom elements. TamlCustomNodes customProperties; // Parse children. parseChildren( stream, pCallbacks, pSimObject, versionId ); // Parse custom elements. parseCustomElements( stream, pCallbacks, customProperties, versionId ); // Are there any Taml callbacks? if ( pCallbacks != NULL ) { // Yes, so call it. mpTaml->tamlPostRead( pCallbacks, customProperties ); } // Return object. return pSimObject; } //----------------------------------------------------------------------------- void TamlBinaryReader::parseAttributes( Stream& stream, SimObject* pSimObject, const U32 versionId ) { // Debug Profiling. PROFILE_SCOPE(TamlBinaryReader_ParseAttributes); // Sanity! AssertFatal( pSimObject != NULL, "Taml: Cannot parse attributes on a NULL object." ); // Fetch attribute count. U32 attributeCount; stream.read( &attributeCount ); // Finish if no attributes. if ( attributeCount == 0 ) return; char valueBuffer[4096]; // Iterate attributes. for ( U32 index = 0; index < attributeCount; ++index ) { // Fetch attribute. StringTableEntry attributeName = stream.readSTString(); stream.readLongString( 4096, valueBuffer ); // We can assume this is a field for now. pSimObject->setPrefixedDataField( attributeName, NULL, valueBuffer ); } } //----------------------------------------------------------------------------- void TamlBinaryReader::parseChildren( Stream& stream, TamlCallbacks* pCallbacks, SimObject* pSimObject, const U32 versionId ) { // Debug Profiling. PROFILE_SCOPE(TamlBinaryReader_ParseChildren); // Sanity! AssertFatal( pSimObject != NULL, "Taml: Cannot parse children on a NULL object." ); // Fetch children count. U32 childrenCount; stream.read( &childrenCount ); // Finish if no children. if ( childrenCount == 0 ) return; // Fetch the Taml children. TamlChildren* pChildren = dynamic_cast<TamlChildren*>( pSimObject ); // Is this a sim set? if ( pChildren == NULL ) { // No, so warn. Con::warnf("Taml: Child element found under parent but object cannot have children." ); return; } // Fetch any container child class specifier. AbstractClassRep* pContainerChildClass = pSimObject->getClassRep()->getContainerChildClass( true ); // Iterate children. for ( U32 index = 0; index < childrenCount; ++ index ) { // Parse child element. SimObject* pChildSimObject = parseElement( stream, versionId ); // Finish if child failed. if ( pChildSimObject == NULL ) return; // Do we have a container child class? if ( pContainerChildClass != NULL ) { // Yes, so is the child object the correctly derived type? if ( !pChildSimObject->getClassRep()->isClass( pContainerChildClass ) ) { // No, so warn. Con::warnf("Taml: Child element '%s' found under parent '%s' but object is restricted to children of type '%s'.", pChildSimObject->getClassName(), pSimObject->getClassName(), pContainerChildClass->getClassName() ); // NOTE: We can't delete the object as it may be referenced elsewhere! pChildSimObject = NULL; // Skip. continue; } } // Add child. pChildren->addTamlChild( pChildSimObject ); // Find Taml callbacks for child. TamlCallbacks* pChildCallbacks = dynamic_cast<TamlCallbacks*>( pChildSimObject ); // Do we have callbacks on the child? if ( pChildCallbacks != NULL ) { // Yes, so perform callback. mpTaml->tamlAddParent( pChildCallbacks, pSimObject ); } } } //----------------------------------------------------------------------------- void TamlBinaryReader::parseCustomElements( Stream& stream, TamlCallbacks* pCallbacks, TamlCustomNodes& customNodes, const U32 versionId ) { // Debug Profiling. PROFILE_SCOPE(TamlBinaryReader_ParseCustomElement); // Read custom node count. U32 customNodeCount; stream.read( &customNodeCount ); // Finish if no custom nodes. if ( customNodeCount == 0 ) return; // Iterate custom nodes. for ( U32 nodeIndex = 0; nodeIndex < customNodeCount; ++nodeIndex ) { //Read custom node name. StringTableEntry nodeName = stream.readSTString(); // Add custom node. TamlCustomNode* pCustomNode = customNodes.addNode( nodeName ); // Parse the custom node. parseCustomNode( stream, pCustomNode, versionId ); } // Do we have callbacks? if ( pCallbacks == NULL ) { // No, so warn. Con::warnf( "Taml: Encountered custom data but object does not support custom data." ); return; } // Custom read callback. mpTaml->tamlCustomRead( pCallbacks, customNodes ); } //----------------------------------------------------------------------------- void TamlBinaryReader::parseCustomNode( Stream& stream, TamlCustomNode* pCustomNode, const U32 versionId ) { // Fetch if a proxy object. bool isProxyObject; stream.read( &isProxyObject ); // Is this a proxy object? if ( isProxyObject ) { // Yes, so parse proxy object. SimObject* pProxyObject = parseElement( stream, versionId ); // Add child node. pCustomNode->addNode( pProxyObject ); return; } // No, so read custom node name. StringTableEntry nodeName = stream.readSTString(); // Add child node. TamlCustomNode* pChildNode = pCustomNode->addNode( nodeName ); // Read child node text. char childNodeTextBuffer[MAX_TAML_NODE_FIELDVALUE_LENGTH]; stream.readLongString( MAX_TAML_NODE_FIELDVALUE_LENGTH, childNodeTextBuffer ); pChildNode->setNodeText( childNodeTextBuffer ); // Read child node count. U32 childNodeCount; stream.read( &childNodeCount ); // Do we have any children nodes? if ( childNodeCount > 0 ) { // Yes, so parse children nodes. for( U32 childIndex = 0; childIndex < childNodeCount; ++childIndex ) { // Parse child node. parseCustomNode( stream, pChildNode, versionId ); } } // Read child field count. U32 childFieldCount; stream.read( &childFieldCount ); // Do we have any child fields? if ( childFieldCount > 0 ) { // Yes, so parse child fields. for( U32 childFieldIndex = 0; childFieldIndex < childFieldCount; ++childFieldIndex ) { // Read field name. StringTableEntry fieldName = stream.readSTString(); // Read field value. char valueBuffer[MAX_TAML_NODE_FIELDVALUE_LENGTH]; stream.readLongString( MAX_TAML_NODE_FIELDVALUE_LENGTH, valueBuffer ); // Add field. pChildNode->addField( fieldName, valueBuffer ); } } }
//+------------------------------------------------------------------------ // // Microsoft Windows // Copyright (C) Microsoft Corporation, 1993. // // File: bm_rrpc.hxx // // Contents: test class definition // // Classes: CRawRpc // // Functions: // // History: 19-July-93 t-martig Created // //-------------------------------------------------------------------------- #ifndef _BM_RAWRPC_HXX_ #define _BM_RAWRPC_HXX_ #include <bm_base.hxx> #include <rawrpc.h> // IRpcTest class CRawRpc : public CTestBase { public: virtual TCHAR *Name (); virtual SCODE Setup (CTestInput *input); virtual SCODE Run (); virtual SCODE Report (CTestOutput &OutputFile); virtual SCODE Cleanup (); private: LPTSTR m_pszStringBinding; handle_t m_hRpc; ULONG m_ulIterations; BOOL m_fAverage; ULONG m_ulVoidTime[TEST_MAX_ITERATIONS]; ULONG m_ulVoidRCTime[TEST_MAX_ITERATIONS]; ULONG m_ulDwordInTime[TEST_MAX_ITERATIONS]; ULONG m_ulDwordOutTime[TEST_MAX_ITERATIONS]; ULONG m_ulDwordInOutTime[TEST_MAX_ITERATIONS]; ULONG m_ulStringInTime[TEST_MAX_ITERATIONS]; ULONG m_ulStringOutTime[TEST_MAX_ITERATIONS]; ULONG m_ulStringInOutTime[TEST_MAX_ITERATIONS]; ULONG m_ulGuidInTime[TEST_MAX_ITERATIONS]; ULONG m_ulGuidOutTime[TEST_MAX_ITERATIONS]; }; #endif
/*============================================================================= Copyright (c) 2001-2011 Joel de Guzman Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) =============================================================================*/ #include <boost/config/warning_disable.hpp> #include <boost/spirit/include/qi.hpp> #include <boost/spirit/include/phoenix.hpp> #include <boost/unordered_map.hpp> #include <boost/algorithm/string/trim.hpp> #include <boost/cstdint.hpp> #include <boost/foreach.hpp> #include <boost/array.hpp> #include <boost/scoped_array.hpp> #include <boost/range/iterator_range.hpp> #include <iostream> #include <iomanip> #include <fstream> #include <vector> #include <algorithm> #include <string> #include <map> // We place the data here. Each line comprises various fields typedef std::vector<std::string> ucd_line; typedef std::vector<ucd_line> ucd_vector; typedef std::vector<ucd_line>::iterator ucd_iterator; // spirit and phoenix using declarations using boost::spirit::qi::parse; using boost::spirit::qi::hex; using boost::spirit::qi::char_; using boost::spirit::qi::eol; using boost::spirit::qi::rule; using boost::spirit::qi::omit; using boost::spirit::qi::_1; using boost::spirit::qi::_val; using boost::phoenix::push_back; using boost::phoenix::ref; // basic unsigned types using boost::uint8_t; using boost::uint16_t; using boost::uint32_t; // a char range struct ucd_range { ucd_range(uint32_t start, uint32_t finish) : start(start), finish(finish) {} // we need this so we can use ucd_range as a multimap key friend bool operator<(ucd_range const& a, ucd_range const& b) { return a.start < b.start; } uint32_t start; uint32_t finish; }; class ucd_info { public: ucd_info(char const* filename) { std::ifstream in(filename, std::ios_base::in); if (!in) { std::cerr << "Error: Could not open input file: " << filename << std::endl; } else { std::string data; // We will read the contents here. in.unsetf(std::ios::skipws); // No white space skipping! std::copy( std::istream_iterator<char>(in), std::istream_iterator<char>(), std::back_inserter(data)); typedef std::string::const_iterator iterator_type; iterator_type f = data.begin(); iterator_type l = data.end(); rule<iterator_type> endl = -('#' >> *(char_-eol)) >> eol; rule<iterator_type, std::string()> field = *(char_-(';'|endl)) >> (';'|&endl); rule<iterator_type, ucd_line()> line = +(field-endl) >> endl; rule<iterator_type, std::vector<ucd_line>()> file = +(endl | line[push_back(_val, _1)]); parse(f, l, file, info); } } template <typename Array> void collect(Array& data, int field, bool collect_properties = true) const { BOOST_ASSERT(!info.empty()); ucd_vector::const_iterator current = info.begin(); ucd_vector::const_iterator end = info.end(); while (current != end) { std::string range = (*current)[0]; boost::trim(range); std::string::const_iterator f = range.begin(); std::string::const_iterator l = range.end(); // get the code-point range uint32_t start; uint32_t finish; parse(f, l, hex[ref(start) = ref(finish) = _1] >> -(".." >> hex[ref(finish) = _1])); // special case for UnicodeData.txt ranges: if ((*current)[1].find("First>") != std::string::npos) { ++current; BOOST_ASSERT(current != end); BOOST_ASSERT((*current)[1].find("Last>") != std::string::npos); std::string range = (*current)[0]; boost::trim(range); f = range.begin(); l = range.end(); parse(f, l, hex[ref(finish) = _1]); } std::string code; if (field < int(current->size())) code = (*current)[field]; boost::trim(code); // Only collect properties we are interested in if (collect_properties) // code for properties { if (!ignore_property(code)) { for (uint32_t i = start; i <= finish; ++i) data[i] |= map_property(code); } } else // code for actual numeric values { for (uint32_t i = start; i <= finish; ++i) { if (code.empty()) { data[i] = 0; // signal that this code maps to itself } else { f = code.begin(); l = code.end(); parse(f, l, hex, data[i]); } } } ++current; } } private: static bool ignore_property(std::string const& p) { // We don't handle all properties std::map<std::string, int>& pm = get_property_map(); std::map<std::string, int>::iterator i = pm.find(p); return i == pm.end(); } static int map_property(std::string const& p) { std::map<std::string, int>& pm = get_property_map(); std::map<std::string, int>::iterator i = pm.find(p); BOOST_ASSERT(i != pm.end()); return i->second; } static std::map<std::string, int>& get_property_map() { // The properties we are interested in: static std::map<std::string, int> map; if (map.empty()) { // General_Category map["Lu"] = 0; map["Ll"] = 1; map["Lt"] = 2; map["Lm"] = 3; map["Lo"] = 4; map["Mn"] = 8; map["Me"] = 9; map["Mc"] = 10; map["Nd"] = 16; map["Nl"] = 17; map["No"] = 18; map["Zs"] = 24; map["Zl"] = 25; map["Zp"] = 26; map["Cc"] = 32; map["Cf"] = 33; map["Co"] = 34; map["Cs"] = 35; map["Cn"] = 36; map["Pd"] = 40; map["Ps"] = 41; map["Pe"] = 42; map["Pc"] = 43; map["Po"] = 44; map["Pi"] = 45; map["Pf"] = 46; map["Sm"] = 48; map["Sc"] = 49; map["Sk"] = 50; map["So"] = 51; // Derived Properties. map["Alphabetic"] = 64; map["Uppercase"] = 128; map["Lowercase"] = 256; map["White_Space"] = 512; map["Hex_Digit"] = 1024; map["Noncharacter_Code_Point"] = 2048; map["Default_Ignorable_Code_Point"] = 4096; // Script map["Arabic"] = 0; map["Imperial_Aramaic"] = 1; map["Armenian"] = 2; map["Avestan"] = 3; map["Balinese"] = 4; map["Bamum"] = 5; map["Bengali"] = 6; map["Bopomofo"] = 7; map["Braille"] = 8; map["Buginese"] = 9; map["Buhid"] = 10; map["Canadian_Aboriginal"] = 11; map["Carian"] = 12; map["Cham"] = 13; map["Cherokee"] = 14; map["Coptic"] = 15; map["Cypriot"] = 16; map["Cyrillic"] = 17; map["Devanagari"] = 18; map["Deseret"] = 19; map["Egyptian_Hieroglyphs"] = 20; map["Ethiopic"] = 21; map["Georgian"] = 22; map["Glagolitic"] = 23; map["Gothic"] = 24; map["Greek"] = 25; map["Gujarati"] = 26; map["Gurmukhi"] = 27; map["Hangul"] = 28; map["Han"] = 29; map["Hanunoo"] = 30; map["Hebrew"] = 31; map["Hiragana"] = 32; map["Katakana_Or_Hiragana"] = 33; map["Old_Italic"] = 34; map["Javanese"] = 35; map["Kayah_Li"] = 36; map["Katakana"] = 37; map["Kharoshthi"] = 38; map["Khmer"] = 39; map["Kannada"] = 40; map["Kaithi"] = 41; map["Tai_Tham"] = 42; map["Lao"] = 43; map["Latin"] = 44; map["Lepcha"] = 45; map["Limbu"] = 46; map["Linear_B"] = 47; map["Lisu"] = 48; map["Lycian"] = 49; map["Lydian"] = 50; map["Malayalam"] = 51; map["Mongolian"] = 52; map["Meetei_Mayek"] = 53; map["Myanmar"] = 54; map["Nko"] = 55; map["Ogham"] = 56; map["Ol_Chiki"] = 57; map["Old_Turkic"] = 58; map["Oriya"] = 59; map["Osmanya"] = 60; map["Phags_Pa"] = 61; map["Inscriptional_Pahlavi"] = 62; map["Phoenician"] = 63; map["Inscriptional_Parthian"] = 64; map["Rejang"] = 65; map["Runic"] = 66; map["Samaritan"] = 67; map["Old_South_Arabian"] = 68; map["Saurashtra"] = 69; map["Shavian"] = 70; map["Sinhala"] = 71; map["Sundanese"] = 72; map["Syloti_Nagri"] = 73; map["Syriac"] = 74; map["Tagbanwa"] = 75; map["Tai_Le"] = 76; map["New_Tai_Lue"] = 77; map["Tamil"] = 78; map["Tai_Viet"] = 79; map["Telugu"] = 80; map["Tifinagh"] = 81; map["Tagalog"] = 82; map["Thaana"] = 83; map["Thai"] = 84; map["Tibetan"] = 85; map["Ugaritic"] = 86; map["Vai"] = 87; map["Old_Persian"] = 88; map["Cuneiform"] = 89; map["Yi"] = 90; map["Inherited"] = 91; map["Common"] = 92; map["Unknown"] = 93; } return map; } ucd_vector info; }; template <typename T, uint32_t block_size_ = 256> class ucd_table_builder { public: static uint32_t const block_size = block_size_; static uint32_t const full_span = 0x110000; typedef T value_type; ucd_table_builder() : p(new T[full_span]) { for (uint32_t i = 0; i < full_span; ++i) p[i] = 0; } void collect(char const* filename, int field, bool collect_properties = true) { std::cout << "collecting " << filename << std::endl; ucd_info info(filename); info.collect(p, field, collect_properties); } void build(std::vector<uint8_t>& stage1, std::vector<T const*>& stage2) { std::cout << "building tables" << std::endl; std::map<block_ptr, std::vector<T const*> > blocks; for (T const* i = p.get(); i < (p.get() + full_span); i += block_size) blocks[block_ptr(i)].push_back(i); // Not enough bits to store the block indices. BOOST_ASSERT(blocks.size() < (1 << (sizeof(uint8_t) * 8))); typedef std::pair<block_ptr, std::vector<T const*> > blocks_value_type; std::map<T const*, std::vector<T const*> > sorted_blocks; BOOST_FOREACH(blocks_value_type const& val, blocks) { sorted_blocks[val.first.p] = val.second; } stage1.clear(); stage1.reserve(full_span / block_size); stage1.resize(full_span / block_size); stage2.clear(); stage2.reserve(blocks.size()); typedef std::pair<T const*, std::vector<T const*> > sorted_blocks_value_type; BOOST_FOREACH(sorted_blocks_value_type const& val, sorted_blocks) { stage2.push_back(val.first); BOOST_FOREACH(T const* val2, val.second) { stage1[(val2 - p.get()) / block_size] = stage2.size() - 1; } } } private: struct block_ptr { block_ptr(T const* p) : p(p) {} friend bool operator<(block_ptr a, block_ptr b) { return std::lexicographical_compare( a.p, a.p + block_size, b.p, b.p + block_size); } T const* p; }; boost::scoped_array<T> p; }; template <typename Out> void print_tab(Out& out, int tab) { for (int i = 0; i < tab; ++i) out << ' '; } template <typename Out, typename C> void print_table(Out& out, C const& c, bool trailing_comma, int width = 4, int group = 16) { int const tab = 4; typename C::size_type size = c.size(); BOOST_ASSERT(size > 1); print_tab(out, tab); out << std::setw(width) << int(c[0]); for (C::size_type i = 1; i < size; ++i) { out << ", "; if ((i % group) == 0) { out << std::endl; print_tab(out, tab); } out << std::setw(width) << int(c[i]); } if (trailing_comma) out << ", " << std::endl; } template <typename Out> void print_head(Out& out) { out << "/*=============================================================================\n" << " Copyright (c) 2001-2011 Joel de Guzman\n" << "\n" << " Distributed under the Boost Software License, Version 1.0. (See accompanying\n" << " file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)\n" << "\n" << " AUTOGENERATED. DO NOT EDIT!!!\n" << "==============================================================================*/\n" << "#include <boost/cstdint.hpp>\n" << "\n" << "namespace boost { namespace spirit { namespace ucd { namespace detail\n" << "{" ; } template <typename Out> void print_tail(Out& out) { out << "\n" << "}}}} // namespace boost::spirit::unicode::detail\n" ; } char const* get_int_type_name(int size) { switch (size) { case 1: return "::boost::uint8_t"; case 2: return "::boost::uint16_t"; case 4: return "::boost::uint32_t"; case 5: return "::boost::uint64_t"; default: BOOST_ASSERT(false); return 0; // invalid size }; } template <typename Out, typename Builder> void print_file(Out& out, Builder& builder, int field_width, char const* name) { std::cout << "Generating " << name << " tables" << std::endl; uint32_t const block_size = Builder::block_size; typedef typename Builder::value_type value_type; print_head(out); std::vector<uint8_t> stage1; std::vector<value_type const*> stage2; builder.build(stage1, stage2); std::cout << "Block Size: " << block_size << std::endl; std::cout << "Total Bytes: " << stage1.size()+(stage2.size()*block_size*sizeof(value_type)) << std::endl; out << "\n" << " static const ::boost::uint8_t " << name << "_stage1[] = {\n" << "\n" ; print_table(out, stage1, false, 3); char const* int_name = get_int_type_name(sizeof(value_type)); out << "\n" << " };" << "\n" << "\n" << " static const " << int_name << ' ' << name << "_stage2[] = {" ; int block_n = 0; for (int i = 0; i < int(stage2.size()); ++i) { value_type const* p = stage2[i]; bool last = (i+1 == stage2.size()); out << "\n\n // block " << block_n++ << std::endl; print_table(out, boost::iterator_range<value_type const*>(p, p+block_size), !last, field_width); } out << "\n" << " };" << "\n" ; out << "\n" << " inline " << int_name << ' ' << name << "_lookup(::boost::uint32_t ch)\n" << " {\n" << " ::boost::uint32_t block_offset = " << name << "_stage1[ch / " << block_size << "] * " << block_size << ";\n" << " return " << name << "_stage2[block_offset + ch % " << block_size << "];\n" << " }\n" ; print_tail(out); } int main() { // The category tables { std::ofstream out("category_table.hpp"); ucd_table_builder<uint16_t, 256> builder; builder.collect("UnicodeData.txt", 2); builder.collect("DerivedCoreProperties.txt", 1); builder.collect("PropList.txt", 1); print_file(out, builder, 4, "category"); } // The script tables { std::ofstream out("script_table.hpp"); ucd_table_builder<uint8_t, 256> builder; builder.collect("Scripts.txt", 1); print_file(out, builder, 3, "script"); } // The lowercase tables { std::ofstream out("lowercase_table.hpp"); ucd_table_builder<uint32_t, 256> builder; builder.collect("UnicodeData.txt", 13, false); print_file(out, builder, 6, "lowercase"); } // The uppercase tables { std::ofstream out("uppercase_table.hpp"); ucd_table_builder<uint32_t, 256> builder; builder.collect("UnicodeData.txt", 12, false); print_file(out, builder, 6, "uppercase"); } return 0; }
// Copyright (c) 2019 Bitcoin Association // Distributed under the Open BSV software license, see the accompanying file LICENSE. #include <clientversion.h> #include <config.h> #include <logging.h> #include <memusage.h> #include <mining/journal_change_set.h> #include <net/net.h> #include <policy/policy.h> #include <scheduler.h> #include <time_locked_mempool.h> #include <txn_validator.h> using namespace mining; CTimeLockedMempool::CTimeLockedMempool() { // Set some sane default values for config mMaxMemory = DEFAULT_MAX_NONFINAL_MEMPOOL_SIZE * ONE_MEBIBYTE; mPeriodRunFreq = DEFAULT_NONFINAL_CHECKS_FREQ; mPurgeAge = DEFAULT_NONFINAL_MEMPOOL_EXPIRY * SECONDS_IN_ONE_HOUR; } // Add or update a time-locked transaction void CTimeLockedMempool::addOrUpdateTransaction( const TxMempoolInfo& info, const TxInputDataSPtr& pTxInputData, CValidationState& state) { CTransactionRef txn { info.GetTx() }; std::unique_lock lock { mMtx }; // Update or new addition? std::set<CTransactionRef> updated { getTransactionsUpdatedByNL(txn) }; if(updated.empty()) { if(state.IsNonFinal()) { // New addition insertNL(info, state); } else { LogPrint(BCLog::MEMPOOL, "Non-final pool ignoring tx that doesn't finalise any we track: %s\n", txn->GetId().ToString()); } } else if(updated.size() == 1) { // Validate update const CTransactionRef& oldTxn { *updated.begin() }; bool finalised; if(validateUpdate(txn, oldTxn, state, finalised)) { // Remove old txn this new one updates removeNL(oldTxn); // Do we want to update to another non-final or are we ready to finalise? if(finalised) { LogPrint(BCLog::MEMPOOL, "Finalising non-final tx: %s\n", txn->GetId().ToString()); // For full belt-and-braces safety, resubmit newly final transaction for revalidation pTxInputData->SetTxSource(TxSource::finalised); pTxInputData->SetTxStorage(info.GetTxStorage()), pTxInputData->SetAcceptTime(GetTime()); state.SetResubmitTx(); } else { insertNL(info, state); } } else { LogPrint(BCLog::MEMPOOL, "Rejecting non-final tx which failed checks: %s\n", txn->GetId().ToString()); } } else { LogPrint(BCLog::MEMPOOL, "Rejecting non-final tx which wants to replace multiple txs: %s\n", txn->GetId().ToString()); state.DoS(10, false, REJECT_INVALID, "bad-txn-update"); } } // Get IDs of all held transactions std::vector<TxId> CTimeLockedMempool::getTxnIDs() const { std::vector<TxId> res {}; std::shared_lock lock { mMtx }; for(const auto& info : mTransactionMap.get<TagTxID>()) { res.emplace_back(info.GetTxId()); } return res; } // Does this finalise an existing time-locked transaction? bool CTimeLockedMempool::finalisesExistingTransaction(const CTransactionRef& txn) const { std::set<CTransactionRef> updated {}; { std::shared_lock lock { mMtx }; if(mTransactionMap.empty()) { // Can't be an update if we're not tracking any time-locked transactions return false; } // Check if this txn could update exactly 1 of our non-final txns and not anything else for(const CTxIn& in : txn->vin) { if(const auto& it { mUTXOMap.find(in.prevout) }; it != mUTXOMap.end()) { updated.emplace(it->second); } else { return false; } } } if(updated.size() == 1) { // Check every input finalises for(const CTxIn& txin : txn->vin) { if(txin.nSequence != CTxIn::SEQUENCE_FINAL) { return false; } } return true; } return false; } // Check the given transaction doesn't try to double spend any of our locked UTXOs. std::set<CTransactionRef> CTimeLockedMempool::checkForDoubleSpend(const CTransactionRef& txn) const { std::shared_lock lock { mMtx }; if(mUTXOMap.empty()) { return {}; } std::set<CTransactionRef> conflictsWith; for(const CTxIn& txin : txn->vin) { if(auto it = mUTXOMap.find(txin.prevout); it != mUTXOMap.end()) { conflictsWith.insert(it->second); } } return conflictsWith; } // Is the given txn ID for one currently held? bool CTimeLockedMempool::exists(const uint256& id) const { std::shared_lock lock { mMtx }; const auto& index { mTransactionMap.get<TagRawTxID>() }; return index.find(id) != index.end(); } // Is the given txn ID for one we held until recently? bool CTimeLockedMempool::recentlyRemoved(const uint256& id) const { std::shared_lock lock { mMtx }; return mRecentlyRemoved.contains(id); } // Fetch the full entry we have for the given txn ID TxMempoolInfo CTimeLockedMempool::getInfo(const uint256& id) const { TxMempoolInfo info {}; std::shared_lock lock { mMtx }; const auto& index { mTransactionMap.get<TagRawTxID>() }; if(const auto& it { index.find(id) }; it != index.end()) { info = *it; } return info; } // Launch periodic checks for finalised txns void CTimeLockedMempool::startPeriodicChecks(CScheduler& scheduler) { scheduler.scheduleEvery(std::bind(&CTimeLockedMempool::periodicChecks, this), mPeriodRunFreq); } // Dump to disk void CTimeLockedMempool::dumpMempool() const { int64_t start { GetTimeMicros() }; std::shared_lock lock { mMtx }; try { FILE* filestr { fsbridge::fopen(GetDataDir() / "non-final-mempool.dat.new", "wb") }; if(!filestr) { throw std::runtime_error("Failed to create new non-final mempool dump file"); } CAutoFile file { filestr, SER_DISK, CLIENT_VERSION }; file << DUMP_FILE_VERSION; const auto& index { mTransactionMap.get<TagTxID>() }; uint64_t numTxns { index.size() }; file << numTxns; for(const auto& details : index) { file << *(details.GetTx()); file << details.nTime; } FileCommit(file.Get()); file.reset(); RenameOver(GetDataDir() / "non-final-mempool.dat.new", GetDataDir() / "non-final-mempool.dat"); int64_t last { GetTimeMicros() }; LogPrintf("Dumped %d txns to non-final mempool: %.6fs to dump\n", numTxns, (last - start) * 0.000001); } catch(const std::exception& e) { LogPrintf("Failed to dump non-final mempool: %s. Continuing anyway.\n", e.what()); } } // Load from disk bool CTimeLockedMempool::loadMempool(const task::CCancellationToken& shutdownToken) const { try { FILE* filestr { fsbridge::fopen(GetDataDir() / "non-final-mempool.dat", "rb") }; CAutoFile file { filestr, SER_DISK, CLIENT_VERSION }; if(file.IsNull()) { throw std::runtime_error("Failed to open non-final mempool file from disk"); } int64_t count {0}; int64_t skipped {0}; int64_t failed {0}; int64_t nNow { GetTime() }; uint64_t version {}; file >> version; if(version != DUMP_FILE_VERSION) { throw std::runtime_error("Bad non-final mempool dump version"); } // Number of saved txns uint64_t numTxns {0}; file >> numTxns; // Take a reference to the validator. const auto& txValidator { g_connman->getTxnValidator() }; // A pointer to the TxIdTracker. const TxIdTrackerWPtr& pTxIdTracker = g_connman->GetTxIdTracker(); while(numTxns--) { CTransactionRef tx {}; int64_t nTime {}; file >> tx; file >> nTime; if(nTime + mPurgeAge > nNow) { // Mempool Journal ChangeSet CJournalChangeSetPtr changeSet { mempool.getJournalBuilder().getNewChangeSet(JournalUpdateReason::INIT) }; std::string reason {}; bool standard { IsStandardTx(GlobalConfig::GetConfig(), *tx, chainActive.Tip()->GetHeight() + 1, reason) }; const CValidationState& state { // Execute txn validation synchronously. txValidator->processValidation( std::make_shared<CTxInputData>( pTxIdTracker, // a pointer to the TxIdTracker tx, // a pointer to the tx TxSource::file, // tx source standard ? TxValidationPriority::high : TxValidationPriority::low, TxStorage::memory, // tx storage nTime), // nAcceptTime changeSet, // an instance of the mempool journal true) // fLimitMempoolSize }; // Check results if(state.IsValid()) { ++count; } else { ++failed; } } else { ++skipped; } if(shutdownToken.IsCanceled()) { // Abort early return false; } } LogPrintf("Imported non-final mempool transactions from disk: %i successes, %i " "failed, %i expired\n", count, failed, skipped); } catch(const std::exception& e) { LogPrintf("Failed to deserialize non-final mempool data on disk: %s. Continuing anyway.\n", e.what()); return false; } return true; } // Get number of txns we hold size_t CTimeLockedMempool::getNumTxns() const { std::shared_lock lock { mMtx }; return mTransactionMap.get<TagTxID>().size(); } // Estimate total memory usage size_t CTimeLockedMempool::estimateMemoryUsage() const { std::shared_lock lock { mMtx }; return estimateMemoryUsageNL(); } // Load or reload our config void CTimeLockedMempool::loadConfig() { std::unique_lock lock { mMtx }; // Get max memory size in bytes mMaxMemory = gArgs.GetArgAsBytes("-maxmempoolnonfinal", DEFAULT_MAX_NONFINAL_MEMPOOL_SIZE, ONE_MEBIBYTE); // Get periodic checks run frequency mPeriodRunFreq = gArgs.GetArg("-checknonfinalfreq", DEFAULT_NONFINAL_CHECKS_FREQ); // Get configured purge age (convert hours to seconds) mPurgeAge = gArgs.GetArg("-mempoolexpirynonfinal", DEFAULT_NONFINAL_MEMPOOL_EXPIRY) * SECONDS_IN_ONE_HOUR; } // Fetch all transactions updated by the given new transaction. // Caller holds mutex. std::set<CTransactionRef> CTimeLockedMempool::getTransactionsUpdatedByNL(const CTransactionRef& txn) const { std::set<CTransactionRef> txns {}; // Find all transactions we're tracking that have any of the same outpoints as this transaction for(const CTxIn& in : txn->vin) { if(const auto& it { mUTXOMap.find(in.prevout) }; it != mUTXOMap.end()) { txns.emplace(it->second); } } return txns; } // Insert a new transaction void CTimeLockedMempool::insertNL(const TxMempoolInfo& info, CValidationState& state) { CTransactionRef txn { info.GetTx() }; // Put new txn in the main index auto& index { mTransactionMap.get<TagTxID>() }; index.emplace(info); // Record UTXOs locked by this transaction for(const CTxIn& input : txn->vin) { mUTXOMap[input.prevout] = txn; } // Track memory used by this txn mTxnMemoryUsage += txn->GetTotalSize(); // Check we haven't exceeded max memory size_t memUsage { estimateMemoryUsageNL() }; if(memUsage > mMaxMemory) { LogPrint(BCLog::MEMPOOL, "Dropping non-final tx %s because mempool is full\n", txn->GetId().ToString()); state.Invalid(false, REJECT_MEMPOOL_FULL, "non-final-pool-full"); removeNL(txn); } else { LogPrint(BCLog::MEMPOOL, "Added non-final tx: %s, mem: %d\n", txn->GetId().ToString(), memUsage); } } // Remove an old transaction void CTimeLockedMempool::removeNL(const CTransactionRef& txn) { // Remove from main index auto& index { mTransactionMap.get<TagTxID>() }; index.erase(txn); // Track removal in bloom filter mRecentlyRemoved.insert(txn->GetId()); // Remove UTXOs locked by that transacrion for(const CTxIn& input : txn->vin) { if(mUTXOMap.erase(input.prevout) != 1) { LogPrint(BCLog::MEMPOOL, "Warning: Failed to find and remove UTXO from old non-final tx %s\n", txn->GetId().ToString()); } } // Update memory used auto txnSize { txn->GetTotalSize() }; if(mTxnMemoryUsage <= txnSize) { mTxnMemoryUsage = 0; } else { mTxnMemoryUsage -= txnSize; } LogPrint(BCLog::MEMPOOL, "Removed old non-final tx: %s, mem: %d\n", txn->GetId().ToString(), estimateMemoryUsageNL()); } // Perform checks on a transaction before allowing an update bool CTimeLockedMempool::validateUpdate(const CTransactionRef& newTxn, const CTransactionRef& oldTxn, CValidationState& state, bool& finalised) const { // Must have same number of inputs if(newTxn->vin.size() != oldTxn->vin.size()) { LogPrint(BCLog::MEMPOOL, "Update to non-final txn has different number of inputs\n"); state.DoS(10, false, REJECT_INVALID, "bad-txn-update"); return false; } bool seenIncrease {false}; finalised = true; // Check corresponding inputs on new and old for(unsigned i = 0; i < newTxn->vin.size(); ++i) { const CTxIn& newInput { newTxn->vin[i] }; const CTxIn& oldInput { oldTxn->vin[i] }; // Check each input spends the same outpoint if(newInput.prevout != oldInput.prevout) { LogPrint(BCLog::MEMPOOL, "Update to non-final txn has different inputs\n"); state.DoS(10, false, REJECT_INVALID, "bad-txn-update"); return false; } // Check sequence numbers are only ever going forward if(newInput.nSequence < oldInput.nSequence) { LogPrint(BCLog::MEMPOOL, "Update to non-final txn would decrease nSequence\n"); state.DoS(10, false, REJECT_INVALID, "bad-txn-update"); return false; } else if(newInput.nSequence > oldInput.nSequence) { seenIncrease = true; if(newInput.nSequence != CTxIn::SEQUENCE_FINAL) { // Still not finalised finalised = false; } } } // Finally, must have seen at least 1 increase in an nSequence number if(!seenIncrease) { LogPrint(BCLog::MEMPOOL, "Update to non-final txn didn't increase any nSequence\n"); state.DoS(10, false, REJECT_INVALID, "bad-txn-update"); return false; } return true; } // Estimate our memory usage size_t CTimeLockedMempool::estimateMemoryUsageNL() const { size_t numElements { mTransactionMap.size() }; // Experiment shows that the memory usage of the multi-index container can be // approximated as: // 24 bytes overhead (3 pointers) per index per (number of elements + 1) // + (sizeof(element) * (number of elements + 1)) constexpr size_t numIndexes {3}; constexpr size_t overhead { 3 * numIndexes * sizeof(void*) }; size_t multiIndexUsage { (overhead * (numElements+1)) + (sizeof(TxnMultiIndex::value_type) * (numElements+1)) }; multiIndexUsage += mTxnMemoryUsage; return memusage::MallocUsage(multiIndexUsage) + memusage::DynamicUsage(mUTXOMap); } // Do periodic checks for finalised txns and txns to purge void CTimeLockedMempool::periodicChecks() { // Get current time int64_t now { GetTime() }; const CBlockIndex* chainTip = chainActive.Tip(); std::unique_lock lock { mMtx }; // A pointer to the TxIdTracker. const TxIdTrackerWPtr& pTxIdTracker = g_connman->GetTxIdTracker(); // Iterate over transactions in unlocking time order auto& index { mTransactionMap.get<TagUnlockingTime>() }; auto it { index.begin() }; while(it != index.end()) { CTransactionRef txn { it->GetTx() }; int64_t insertionTime { it->nTime }; int64_t timeInPool { now - insertionTime }; // Move iterator on so we don't have to care whether this txn gets removed ++it; // Lock time passed? if(IsFinalTx(*txn, chainTip->GetHeight() + 1, chainTip->GetMedianTimePast())) { LogPrint(BCLog::MEMPOOL, "Finalising non-final transaction %s at block height %d, mtp %d\n", txn->GetId().ToString(), chainTip->GetHeight() + 1, chainTip->GetMedianTimePast()); removeNL(txn); // For full belt-and-braces safety, resubmit newly final transaction for revalidation // This revalidation is mandatory as some of the transactions might become frozen // in the meantime std::string reason {}; bool standard { IsStandardTx(GlobalConfig::GetConfig(), *txn, chainTip->GetHeight() + 1, reason) }; g_connman->EnqueueTxnForValidator( std::make_shared<CTxInputData>( pTxIdTracker, txn, TxSource::finalised, standard ? TxValidationPriority::high : TxValidationPriority::low, TxStorage::memory, GetTime())); } // Purge age passed? else if(timeInPool >= mPurgeAge) { LogPrint(BCLog::MEMPOOL, "Purging expired non-final transaction: %s\n", txn->GetId().ToString()); removeNL(txn); } } }
// Copyright Carl Philipp Reh 2006 - 2019. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #include <sge/x11input/atom_name.hpp> #include <awl/backends/x11/display.hpp> #include <awl/backends/x11/display_ref.hpp> #include <awl/backends/x11/free.hpp> #include <fcppt/config/external_begin.hpp> #include <X11/Xlib.h> #include <fcppt/config/external_end.hpp> sge::x11input::atom_name::atom_name( awl::backends::x11::display_ref const _display, Atom const _atom) : name_(::XGetAtomName(_display.get().get(), _atom)) { } sge::x11input::atom_name::~atom_name() { awl::backends::x11::free(name_); } char const *sge::x11input::atom_name::get() const { return name_; }
#include "site-test.h" #include <QNetworkCookie> #include <QtTest> #include "custom-network-access-manager.h" #include "tags/tag.h" void SiteTest::init() { setupSource("Danbooru (2.0)"); setupSite("Danbooru (2.0)", "danbooru.donmai.us"); m_profile = new Profile("tests/resources/"); m_settings = m_profile->getSettings(); m_source = new Source(m_profile, "tests/resources/sites/Danbooru (2.0)"); m_site = new Site("danbooru.donmai.us", m_source); } void SiteTest::cleanup() { m_profile->deleteLater(); m_source->deleteLater(); m_site->deleteLater(); } void SiteTest::testDefaultApis() { QSettings settings("tests/resources/sites/Danbooru (2.0)/danbooru.donmai.us/defaults.ini", QSettings::IniFormat); settings.setValue("sources/usedefault", false); settings.setValue("sources/source_1", ""); settings.setValue("sources/source_2", ""); settings.setValue("sources/source_3", ""); settings.setValue("sources/source_4", ""); Source source(m_profile, "tests/resources/sites/Danbooru (2.0)"); Site site("danbooru.donmai.us", &source); QCOMPARE(site.getApis().count(), 3); } void SiteTest::testNoApis() { QSettings settings("tests/resources/sites/Danbooru (2.0)/danbooru.donmai.us/defaults.ini", QSettings::IniFormat); settings.setValue("sources/usedefault", false); settings.setValue("sources/source_1", "1"); settings.setValue("sources/source_2", "2"); settings.setValue("sources/source_3", "3"); settings.setValue("sources/source_4", "4"); Source source(m_profile, "tests/resources/sites/Danbooru (2.0)"); Site site("danbooru.donmai.us", &source); QCOMPARE(site.getApis().count(), 0); } void SiteTest::testFixUrlBasic() { QCOMPARE(m_site->fixUrl(""), QUrl()); QCOMPARE(m_site->fixUrl("http://test.com/dir/toto.jpg"), QUrl("https://test.com/dir/toto.jpg")); QCOMPARE(m_site->fixUrl("//test.com/dir/toto.jpg"), QUrl("https://test.com/dir/toto.jpg")); } void SiteTest::testFixUrlRoot() { QCOMPARE(m_site->fixUrl("/dir/toto.jpg"), QUrl("https://danbooru.donmai.us/dir/toto.jpg")); QCOMPARE(m_site->fixUrl("dir/toto.jpg"), QUrl("https://danbooru.donmai.us/dir/toto.jpg")); } void SiteTest::testFixUrlRelative() { QCOMPARE(m_site->fixUrl("dir/toto.jpg", QUrl("http://test.com/dir/")), QUrl("http://test.com/dir/dir/toto.jpg")); QCOMPARE(m_site->fixUrl("toto.jpg", QUrl("http://test.com/dir/file.html")), QUrl("http://test.com/dir/toto.jpg")); QCOMPARE(m_site->fixUrl("toto.jpg", QUrl("http://test.com/dir/")), QUrl("http://test.com/dir/toto.jpg")); } void SiteTest::testGetSites() { QList<Site*> sites; sites = m_profile->getFilteredSites(QStringList() << "danbooru.donmai.us"); QCOMPARE(sites.count(), 1); QCOMPARE(sites.first()->url(), QString("danbooru.donmai.us")); QCOMPARE(sites.first()->type(), QString("Danbooru (2.0)")); sites = m_profile->getFilteredSites(QStringList() << "test (does not exist)" << "danbooru.donmai.us"); QCOMPARE(sites.count(), 1); QCOMPARE(sites.first()->url(), QString("danbooru.donmai.us")); QCOMPARE(sites.first()->type(), QString("Danbooru (2.0)")); } void SiteTest::testLoadTags() { // Wait for tags qRegisterMetaType<QList<Tag>>(); QSignalSpy spy(m_site, SIGNAL(finishedLoadingTags(QList<Tag>))); m_site->loadTags(3, 20); QVERIFY(spy.wait()); // Get results QList<QVariant> arguments = spy.takeFirst(); QVariantList variants = arguments.at(0).value<QVariantList>(); // Convert results QVector<Tag> tags; QStringList tagsText; tags.reserve(variants.count()); tagsText.reserve(variants.count()); for (const QVariant &variant : variants) { Tag tag = variant.value<Tag>(); tags.append(tag); tagsText.append(tag.text()); } // Compare results tagsText = tagsText.mid(0, 3); QCOMPARE(tags.count(), 20); QCOMPARE(tagsText, QStringList() << "kameji_(tyariri)" << "the_king_of_fighterx_xiv" << "condom_skirt"); } void SiteTest::testCookies() { QList<QNetworkCookie> cookies; cookies.append(QNetworkCookie("test_name_1", "test_value_1")); cookies.append(QNetworkCookie("test_name_2", "test_value_2")); QList<QVariant> cookiesVariant; cookiesVariant.reserve(cookies.count()); for (const QNetworkCookie &cookie : cookies) { cookiesVariant.append(cookie.toRawForm()); } QSettings siteSettings("tests/resources/sites/Danbooru (2.0)/danbooru.donmai.us/defaults.ini", QSettings::IniFormat); siteSettings.setValue("cookies", cookiesVariant); siteSettings.sync(); m_site->loadConfig(); QList<QNetworkCookie> siteCookies(m_site->cookies()); QCOMPARE(siteCookies.count(), cookies.count()); QCOMPARE(siteCookies[0].name(), cookies[0].name()); QCOMPARE(siteCookies[0].value(), cookies[0].value()); QCOMPARE(siteCookies[1].name(), cookies[1].name()); QCOMPARE(siteCookies[1].value(), cookies[1].value()); } void SiteTest::testLoginNone() { // Prepare settings QSettings siteSettings("tests/resources/sites/Danbooru (2.0)/danbooru.donmai.us/defaults.ini", QSettings::IniFormat); siteSettings.setValue("login/parameter", true); m_site->loadConfig(); // Wait for login QSignalSpy spy(m_site, SIGNAL(loggedIn(Site*, Site::LoginResult))); QTimer::singleShot(0, m_site, SLOT(login())); QVERIFY(spy.wait()); // Get result QList<QVariant> arguments = spy.takeFirst(); Site::LoginResult result = arguments.at(1).value<Site::LoginResult>(); QCOMPARE(result, Site::LoginResult::Impossible); } void SiteTest::testLoginGet() { // Prepare settings QSettings siteSettings("tests/resources/sites/Danbooru (2.0)/danbooru.donmai.us/defaults.ini", QSettings::IniFormat); siteSettings.setValue("auth/pseudo", "user"); siteSettings.setValue("auth/password", "somepassword"); siteSettings.setValue("login/type", "get"); siteSettings.setValue("login/get/pseudo", "name"); siteSettings.setValue("login/get/password", "password"); siteSettings.setValue("login/get/url", "/session/new"); siteSettings.setValue("login/get/cookie", "_danbooru_session"); m_site->loadConfig(); CustomNetworkAccessManager::NextFiles.enqueue("tests/resources/pages/danbooru.donmai.us/login.html"); // Wait for login QSignalSpy spy(m_site, SIGNAL(loggedIn(Site*, Site::LoginResult))); QTimer *timer = new QTimer(this); timer->setSingleShot(true); connect(timer, &QTimer::timeout, [=]() { m_site->login(true); timer->deleteLater(); }); timer->start(0); QVERIFY(spy.wait()); // Get result QList<QVariant> arguments = spy.takeFirst(); Site::LoginResult result = arguments.at(1).value<Site::LoginResult>(); QCOMPARE(result, Site::LoginResult::Error); } void SiteTest::testLoginPost() { // Prepare settings QSettings siteSettings("tests/resources/sites/Danbooru (2.0)/danbooru.donmai.us/defaults.ini", QSettings::IniFormat); siteSettings.setValue("auth/pseudo", "user"); siteSettings.setValue("auth/password", "somepassword"); siteSettings.setValue("login/type", "post"); siteSettings.setValue("login/post/pseudo", "name"); siteSettings.setValue("login/post/password", "password"); siteSettings.setValue("login/post/url", "/session"); siteSettings.setValue("login/post/cookie", "_danbooru_session"); m_site->loadConfig(); // Wait for login QSignalSpy spy(m_site, SIGNAL(loggedIn(Site*, Site::LoginResult))); QTimer *timer = new QTimer(this); timer->setSingleShot(true); connect(timer, &QTimer::timeout, [=]() { m_site->login(true); timer->deleteLater(); }); timer->start(0); QVERIFY(spy.wait()); // Get result QList<QVariant> arguments = spy.takeFirst(); Site::LoginResult result = arguments.at(1).value<Site::LoginResult>(); QCOMPARE(result, Site::LoginResult::Error); } static SiteTest instance;
#include "ReflectiveMaterial.h" #include "brdf\BRDF.h" #include "brdf\GlossySpecular.h" USING_RDPS USING_FRWK ReflectiveMaterial::ReflectiveMaterial() : MaterialBase(REFLECT_MATERIAL), diffuse(nullptr), specular(nullptr), reflect(nullptr) {} ReflectiveMaterial::ReflectiveMaterial(BRDF *_ambient, BRDF *_diffuse, GlossySpecular *_specular, BRDF *_reflect) : MaterialBase(_ambient, REFLECT_MATERIAL), diffuse(_diffuse), specular(_specular), reflect(_reflect) {} ReflectiveMaterial::ReflectiveMaterial(const RT::Vec3f &color, const float ambient, const float diffuse, const float specular, const float intensity, const float reflect) : MaterialBase(new BRDF(color, ambient, AMBIENT), REFLECT_MATERIAL), diffuse(new BRDF(color, diffuse, DIFFUSE)), specular(new GlossySpecular(color, specular, intensity)), reflect(new BRDF(color, reflect, AMBIENT)) {} ReflectiveMaterial::ReflectiveMaterial(const ReflectiveMaterial &other) : MaterialBase(other), diffuse(other.diffuse->Clone()), specular(other.specular->Clone()), reflect(other.reflect->Clone()) { } ReflectiveMaterial::~ReflectiveMaterial() { if (diffuse) delete diffuse; if (specular) delete specular; if (reflect) delete reflect; } ReflectiveMaterial &ReflectiveMaterial::operator=(const ReflectiveMaterial &other) { MaterialBase::operator=(other); diffuse = other.diffuse->Clone(); specular = other.specular->Clone(); reflect = other.reflect->Clone(); return (*this); } BRDF *ReflectiveMaterial::GetDiffuse() const { return diffuse; } GlossySpecular *ReflectiveMaterial::GetSpecular() const { return specular; } BRDF *ReflectiveMaterial::GetReflect() const { return reflect; } ReflectiveMaterial *ReflectiveMaterial::Clone() const { return new ReflectiveMaterial(*this); } ReflectiveMaterial &ReflectiveMaterial::SetColor(const RT::Vec3f &color) { ambient->SetColor(color); diffuse->SetColor(color); specular->SetColor(color); reflect->SetColor(color); return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetColor(const float r, const float g, const float b) { ambient->SetColor(RT::Vec3f(r, g, b)); diffuse->SetColor(RT::Vec3f(r, g, b)); specular->SetColor(RT::Vec3f(r, g, b)); reflect->SetColor(RT::Vec3f(r, g, b)); return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetDiffuse(BRDF *_diffuse) { *diffuse = *_diffuse; return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetSpecular(GlossySpecular *_specular) { if (!specular) specular = _specular; *specular = *_specular; return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetReflect(BRDF *_reflect) { *reflect = *_reflect; return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetDiffuseK(const float _diffuse) { diffuse->SetK(_diffuse); return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetSpecularK(const float _specular) { specular->SetK(_specular); return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetIntensity(const float _intensity) { specular->SetExp(_intensity); return (*this); } ReflectiveMaterial &ReflectiveMaterial::SetReflectK(const float _reflect) { reflect->SetK(_reflect); return (*this); }
#include "HorizontalImgList.h" #include <QPainter> #include <QDebug> #include "utils/utils.h" namespace Ui { HorizontalImgList::HorizontalImgList(const Options &_options, QWidget *_parent) : QWidget(_parent), options_(_options) { setMouseTracking(true); setCursor(QCursor(Qt::CursorShape::PointingHandCursor)); connect(this, &HorizontalImgList::itemHovered, this, &HorizontalImgList::onItemHovered); connect(this, &HorizontalImgList::itemClicked, this, &HorizontalImgList::onItemClicked); } void HorizontalImgList::setIdenticalItems(const ImageInfo& _info, int _count) { items_.clear(); states_.clear(); items_.reserve(_count); states_.reserve(_count); for (auto i = 0; i < _count; ++i) { items_.push_back(ImageInfo(_info.size_, _info.defaultImage_, _info.highlightedImage_)); states_.push_back(ImageState()); } setFixedHeight(_info.size_.height()); } void HorizontalImgList::setItemOffsets(const QMargins &_margins) { itemOffsets_ = _margins; } int HorizontalImgList::itemsCount() const { return items_.count(); } QSize HorizontalImgList::sizeHint() const { if (!items_.empty()) { auto width = 0; auto height = 0; for (const auto& item: items_) { width += item.size_.width(); width += itemOffsets_.right(); height = std::max(height,item.size_.height()); } return { width, height }; } return QWidget::sizeHint(); } void HorizontalImgList::paintEvent(QPaintEvent *_event) { QWidget::paintEvent(_event); // draw stuff QPainter p(this); int x = 0, y = 0; for (int i = 0; i < items_.size(); ++i) { const auto &item = items_[i]; states_[i].currentXPos_ = x; states_[i].currentWidth_ = item.size_.width(); states_[i].currentRightOffset_ = itemOffsets_.right(); p.drawPixmap(x, y, item.size_.width(), item.size_.height(), states_[i].highlighted_ ? item.highlightedImage_ : item.defaultImage_); x += item.size_.width(); x += itemOffsets_.right(); } } void HorizontalImgList::mouseMoveEvent(QMouseEvent *_event) { auto index = itemIndexForXPos(_event->pos().x()); if (index != -1) { Q_EMIT itemHovered(index); } QWidget::mouseMoveEvent(_event); } void HorizontalImgList::mouseReleaseEvent(QMouseEvent *_event) { auto index = itemIndexForXPos(_event->pos().x()); if (index != -1) Q_EMIT itemClicked(index); QWidget::mouseReleaseEvent(_event); } void HorizontalImgList::leaveEvent(QEvent *) { Q_EMIT mouseLeft(); if (options_.rememberClickedHighlight_ && options_.hlClicked_ != -1) { highlightUserSelection(options_.hlClicked_); return; } for (auto &state: states_) state.highlighted_ = false; update(); } void HorizontalImgList::onItemHovered(int _index) { if (!options_.highlightOnHover_ || _index == -1) return; if (options_.rememberClickedHighlight_ && options_.hlClicked_ != -1 && options_.hlClicked_ >= _index) return; for (int i = 0; i < states_.size(); ++i) states_[i].highlighted_ = (_index >= options_.hlOnHoverHigherThan_) && (i <= _index); update(); } void HorizontalImgList::onItemClicked(int _index) { if (!options_.rememberClickedHighlight_) return; options_.hlClicked_ = _index; highlightUserSelection(_index); } int HorizontalImgList::itemIndexForXPos(int _x) { for (int i = 0; i < states_.size(); ++i) { if (states_[i].hasX(_x)) return i; } return -1; } void HorizontalImgList::highlightUserSelection(int _index) { for (int i = 0; i < states_.size(); ++i) states_[i].highlighted_ = (i <= _index); update(); } // ImageState bool HorizontalImgList::ImageState::hasX(int _x) const { if (_x == -1 || currentXPos_ == -1 || currentWidth_ == -1) return false; return _x >= currentXPos_ && _x <= (currentXPos_ + currentWidth_ + currentRightOffset_); } }
#include <iostream> #include <iomanip> #include <omp.h> using namespace std; double calcPi(int num_steps=1000000) { double step = 1. / (double)num_steps; double pi; double sum = 0.; double x; #pragma omp parallel for reduction(+:sum) private(x) for (int i = 0; i < num_steps; ++i) { x = (i+0.5)*step; sum += 4. / (1 + x*x); } pi = sum * step; return pi; } int main() { double pi; double start = omp_get_wtime(); pi = calcPi(); double et = omp_get_wtime() - start; cout << "Pi: " << setprecision(11) << pi << endl; cout << "Time: " << et << " seconds\n"; return 0; }
// Block String - Patrick Kubiak - 6/8/2015 // Put your text in a block! #include <iostream> #include <string> using namespace std; void blockString(string str); int main() { // input string strInput; cout << "Block Text - Put your text in a block!" << endl; cout << "Text: "; getline(cin, strInput); // output blockString(strInput); system("pause"); return 0; } void blockString(string str) { unsigned int i; // string length is an unsigned integer, lets have i match that // top dashes for (i = 0; i < str.length() + 2; i++) { cout << "-"; } cout << endl; // text cout << "!" << str << "!" << endl; // bottom dashes for (i = 0; i < str.length() + 2; i++) { cout << "-"; } cout << endl; }
/** * Copyright 2019 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "tools/converter/legacy_optimizer/graph/set_unused_quant_param_to_default_pass.h" #include "tools/common/tensor_util.h" #include "src/common/log_util.h" namespace mindspore::lite { STATUS SetUnusedQuantParamToDefaultPass::Run(schema::MetaGraphT *graph) { CHECK_NULL_RETURN(graph); for (auto &tensor : graph->allTensors) { bool has_quant_param = false; for (auto &quant_param : tensor->quantParams) { quant_param->min = 0.0; quant_param->max = 0.0; quant_param->narrowRange = true; if (quant_param->inited) { has_quant_param = true; quant_param->inited = false; } else { quant_param = nullptr; } } if (!has_quant_param) { tensor->quantParams.clear(); } } return RET_OK; } } // namespace mindspore::lite