blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 4 201 | content_id stringlengths 40 40 | detected_licenses listlengths 0 85 | license_type stringclasses 2
values | repo_name stringlengths 7 100 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 260
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 11.4k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 80
values | src_encoding stringclasses 28
values | language stringclasses 1
value | is_vendor bool 1
class | is_generated bool 2
classes | length_bytes int64 8 9.86M | extension stringclasses 52
values | content stringlengths 8 9.86M | authors listlengths 1 1 | author stringlengths 0 119 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
95561fb1cc8b75fb9f0a480c7720f2bab01656d3 | e42d51e8da1e4454570b0c95e1a8c54fe0605e5f | /src/game.cpp | db5e2d344c4d82e50cf7e94d433bf992ea3a2112 | [] | no_license | Saito86/shooter_demo | d87f0ef1256ff6179ff0b2ed0bb7555bb37dd9f7 | 5a1b8acab71e15f4a512572e15b4800e60d74fd3 | refs/heads/master | 2021-01-10T18:50:50.662972 | 2015-02-18T12:29:27 | 2015-02-18T12:29:27 | 30,964,743 | 0 | 1 | null | null | null | null | ISO-8859-1 | C++ | false | false | 4,136 | cpp | ////////////////////////////////////////////
//////
//Simple First Person Shooter Demo
//////
//by Eugen Schäfer
//////
////////////////////////////////////////////
#include "game.h"
//GAME
Game::Game()
{
level = NULL;
player = NULL;
cam = NULL;
hud = NULL;
}
Game::~Game()
{
saiMaterial::cleanupShaders();
delete level;
delete player;
delete cam;
delete hud;
for(unsigned int i=0;i<enemies.size();i++)
delete enemies[i];
saiDecal::cleanUp();
saiSoundManager::cleanUp();
saiParticleManager::cleanup();
saiLightManager::cleanUp();
}
bool Game::init()
{
//init engine framework
if(!fWork.init())
return false;
//init input
//doesn't do anything yet
if(!input.init())
return false;
//shader need to be compiled at runtime
//at the beginnig of the program
//after the framework has initialized
saiMaterial::compileShaders();
if(!saiSoundManager::init())
return false;
//loads a level
//hardcoded for now
if(!loadLevel("testroom2.smp"))
return false;
if(!saiParticleManager::init())
return false;
hud = new Hud();
cam = new saiCamera();
player = new Player(cam,hud);
player->setMap(level);
return true;
}
void Game::startMainloop()
{
float deltaTime = 0.0;
int thisTime = 0;
int lastTime = 0;
while(saiOptions::isRunning)
{
//time since last frame in seconds
//is needed in the update functions
//for framerate-indipendent movement
thisTime = SDL_GetTicks();
deltaTime = (float)(thisTime - lastTime)/1000;
lastTime = thisTime;
//update key events
//returns false on Quit event
if(!input.update())
saiOptions::isRunning = false;
//updates the timer
fWork.update();
//bullet step simulation
physics.update(deltaTime);
//update enemies
for(unsigned int i=0;i<enemies.size();i++)
enemies[i]->update(deltaTime);
level->update(deltaTime);
player->update(deltaTime);
//clear buffer
fWork.beginScene();
//set perspective view matrix
fWork.set3d();
//update cam
cam->update();
//render sky
level->renderSky();
//transform the scene
//must be after render sky
//or sky won't work properly
cam->applyCameraMatrix();
//sort lights by distance to camera
//doesn't work properly yet
saiLightManager::setNearestLights(cam->pos);
//render map
level->render(deltaTime);
for(unsigned int i=0;i<enemies.size();i++)
enemies[i]->render(deltaTime);
player->render(deltaTime);
//render collision objects and mesh normals
if(saiOptions::bDebugView)
physics.render();
//render particles
saiParticleManager::render(deltaTime);
//set orthographic view
fWork.set2d();
//render gui
hud->render();
//swap buffers
fWork.endScene();
//reset stats on collision objects
physics.resetObjects();
//limit framerate to 60 fps
fWork.capFPS(60);
}
}
bool Game::loadLevel(std::string fname)
{
//load the map
level = saiMap::load(fname);
//check for errors
if(level == NULL)
return false;
//spawn actors
for(unsigned int i=0;i<level->spawnpoints.size();i++)
{
saiSpawnpoint spawn = level->spawnpoints[i];
if(spawn.id > 0)
{
if(spawn.Name == "enemySpawn")
{
Enemy* newEnemy = new Enemy(spawn.Position.x,spawn.Position.y,spawn.Position.z,spawn.Direction);
enemies.push_back(newEnemy);
}
}
}
return true;
}
//MAIN
int main(int argc,char* args[])
{
//create game objects
Game game;
//init game, which also inits all engine subsystems
if(!game.init())
return 1;
//start the mainloop
game.startMainloop();
return 0;
}
| [
"eugenschaefer@gmx.net"
] | eugenschaefer@gmx.net |
a013d9b12a39eb94a704e4c008cd6110147db5ae | d36a6eef776fc10c8e6328e8b9ab609003f84dc2 | /src/ferguson_control.cpp | 80f939d425446558089072fffa562a0f8ef25b20 | [] | no_license | dennisjosesilva/ferguson-patch-visualiser | 7bd6e007d9a2a5e766472e1f559fcb5544ee41c9 | c92bdfce2612233c9b01ae765936e72ae7fe71c7 | refs/heads/master | 2020-11-24T14:25:08.555970 | 2020-07-05T19:58:23 | 2020-07-05T19:58:23 | 228,192,271 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,222 | cpp | #include <ferguson_control.hpp>
#include <QVBoxLayout>
#include <QHBoxLayout>
FergusonControl::FergusonControl(QWidget *parent, Renderer *renderer)
:QWidget(parent), renderer_{renderer}
{
QVBoxLayout *mainLayout = new QVBoxLayout();
mainLayout->setAlignment(Qt::AlignTop) ;
QHBoxLayout *titleLayout = new QHBoxLayout();
titleLayout->setAlignment(Qt::AlignCenter);
titlelabel_ = new QLabel(tr("Ferguson Patch Control"), this);
titlelabel_->setStyleSheet("font-weight: bold; font-size: 11pt;");
titlelabel_->setContentsMargins(0, 2, 0 , 15);
titleLayout->addWidget(titlelabel_);
mainLayout->addLayout(titleLayout);
QHBoxLayout *showHandlerLayout = new QHBoxLayout();
showHandlerschk_ = new QCheckBox("Show handlers");
showHandlerschk_->setCheckState(Qt::Checked);
QObject::connect(showHandlerschk_, &QCheckBox::stateChanged,
this, &FergusonControl::showHandlers_stateChanged);
showHandlerLayout->addWidget(showHandlerschk_);
mainLayout->addLayout(showHandlerLayout);
setLayout(mainLayout);
}
void FergusonControl::showHandlers_stateChanged(int state)
{
switch(state)
{
case Qt::Unchecked:
renderer_->hideHandlers();
break;
case Qt::Checked:
renderer_->showHandlers();
break;
}
} | [
"dennisjosesilva@gmail.com"
] | dennisjosesilva@gmail.com |
d7a9a2d36429e3f2e6777befc4aa2c341f0bcf1d | da3b4530148feaab0ac073eeb061a95d982b875c | /src/qt/bitcoin.cpp | 9c6236caf0352aa6b3170b3dcc3acebc28f91252 | [
"MIT"
] | permissive | ericnobody/GilbesCoin | 384121fdca27859810c6feb06120e3e53f6266ab | 3ae0be3f096ecdcb7b173237a472a6ea7300bcc4 | refs/heads/master | 2016-08-11T07:28:32.338508 | 2015-12-22T00:14:29 | 2015-12-22T00:14:29 | 48,361,094 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 10,998 | cpp | /*
* W.J. van der Laan 2011-2012
*/
#include "bitcoingui.h"
#include "clientmodel.h"
#include "walletmodel.h"
#include "optionsmodel.h"
#include "guiutil.h"
#include "guiconstants.h"
#include "init.h"
#include "ui_interface.h"
#include "qtipcserver.h"
#include <QApplication>
#include <QMessageBox>
#include <QTextCodec>
#include <QLocale>
#include <QTranslator>
#include <QSplashScreen>
#include <QLibraryInfo>
#include <boost/interprocess/ipc/message_queue.hpp>
#include <boost/algorithm/string/predicate.hpp>
#if defined(BITCOIN_NEED_QT_PLUGINS) && !defined(_BITCOIN_QT_PLUGINS_INCLUDED)
#define _BITCOIN_QT_PLUGINS_INCLUDED
#define __INSURE__
#include <QtPlugin>
Q_IMPORT_PLUGIN(qcncodecs)
Q_IMPORT_PLUGIN(qjpcodecs)
Q_IMPORT_PLUGIN(qtwcodecs)
Q_IMPORT_PLUGIN(qkrcodecs)
Q_IMPORT_PLUGIN(qtaccessiblewidgets)
#endif
// Need a global reference for the notifications to find the GUI
static BitcoinGUI *guiref;
static QSplashScreen *splashref;
static void ThreadSafeMessageBox(const std::string& message, const std::string& caption, int style)
{
// Message from network thread
if(guiref)
{
bool modal = (style & CClientUIInterface::MODAL);
// in case of modal message, use blocking connection to wait for user to click OK
QMetaObject::invokeMethod(guiref, "error",
modal ? GUIUtil::blockingGUIThreadConnection() : Qt::QueuedConnection,
Q_ARG(QString, QString::fromStdString(caption)),
Q_ARG(QString, QString::fromStdString(message)),
Q_ARG(bool, modal));
}
else
{
printf("%s: %s\n", caption.c_str(), message.c_str());
fprintf(stderr, "%s: %s\n", caption.c_str(), message.c_str());
}
}
static bool ThreadSafeAskFee(int64 nFeeRequired, const std::string& strCaption)
{
if(!guiref)
return false;
if(nFeeRequired < MIN_TX_FEE || nFeeRequired <= nTransactionFee || fDaemon)
return true;
bool payFee = false;
QMetaObject::invokeMethod(guiref, "askFee", GUIUtil::blockingGUIThreadConnection(),
Q_ARG(qint64, nFeeRequired),
Q_ARG(bool*, &payFee));
return payFee;
}
static void ThreadSafeHandleURI(const std::string& strURI)
{
if(!guiref)
return;
QMetaObject::invokeMethod(guiref, "handleURI", GUIUtil::blockingGUIThreadConnection(),
Q_ARG(QString, QString::fromStdString(strURI)));
}
static void InitMessage(const std::string &message)
{
if(splashref)
{
splashref->showMessage(QString::fromStdString(message), Qt::AlignBottom|Qt::AlignHCenter, QColor(255,255,200));
QApplication::instance()->processEvents();
}
}
static void QueueShutdown()
{
QMetaObject::invokeMethod(QCoreApplication::instance(), "quit", Qt::QueuedConnection);
}
/*
Translate string to current locale using Qt.
*/
static std::string Translate(const char* psz)
{
return QCoreApplication::translate("bitcoin-core", psz).toStdString();
}
/* Handle runaway exceptions. Shows a message box with the problem and quits the program.
*/
static void handleRunawayException(std::exception *e)
{
PrintExceptionContinue(e, "Runaway exception");
QMessageBox::critical(0, "Runaway exception", BitcoinGUI::tr("A fatal error occured. GilbesCoin can no longer continue safely and will quit.") + QString("\n\n") + QString::fromStdString(strMiscWarning));
exit(1);
}
#ifndef BITCOIN_QT_TEST
int main(int argc, char *argv[])
{
// TODO: implement URI support on the Mac.
#if !defined(MAC_OSX)
// Do this early as we don't want to bother initializing if we are just calling IPC
for (int i = 1; i < argc; i++)
{
if (boost::algorithm::istarts_with(argv[i], "gilbescoin:"))
{
const char *strURI = argv[i];
try {
boost::interprocess::message_queue mq(boost::interprocess::open_only, BITCOINURI_QUEUE_NAME);
if (mq.try_send(strURI, strlen(strURI), 0))
// if URI could be sent to the message queue exit here
exit(0);
else
// if URI could not be sent to the message queue do a normal Bitcoin-Qt startup
break;
}
catch (boost::interprocess::interprocess_exception &ex) {
// don't log the "file not found" exception, because that's normal for
// the first start of the first instance
if (ex.get_error_code() != boost::interprocess::not_found_error)
{
printf("main() - boost interprocess exception #%d: %s\n", ex.get_error_code(), ex.what());
break;
}
}
}
}
#endif
// Internal string conversion is all UTF-8
QTextCodec::setCodecForTr(QTextCodec::codecForName("UTF-8"));
QTextCodec::setCodecForCStrings(QTextCodec::codecForTr());
Q_INIT_RESOURCE(bitcoin);
QApplication app(argc, argv);
// Install global event filter that makes sure that long tooltips can be word-wrapped
app.installEventFilter(new GUIUtil::ToolTipToRichTextFilter(TOOLTIP_WRAP_THRESHOLD, &app));
// Command-line options take precedence:
ParseParameters(argc, argv);
// ... then bitcoin.conf:
if (!boost::filesystem::is_directory(GetDataDir(false)))
{
fprintf(stderr, "Error: Specified directory does not exist\n");
return 1;
}
ReadConfigFile(mapArgs, mapMultiArgs);
// Application identification (must be set before OptionsModel is initialized,
// as it is used to locate QSettings)
app.setOrganizationName("GilbesCoin");
app.setOrganizationDomain("we-have-no-domain-yet.nex");
if(GetBoolArg("-testnet")) // Separate UI settings for testnet
app.setApplicationName("gilbescoin-qt-testnet");
else
app.setApplicationName("gilbescoin-qt");
// ... then GUI settings:
OptionsModel optionsModel;
// Get desired locale (e.g. "de_DE") from command line or use system locale
QString lang_territory = QString::fromStdString(GetArg("-lang", QLocale::system().name().toStdString()));
QString lang = lang_territory;
// Convert to "de" only by truncating "_DE"
lang.truncate(lang_territory.lastIndexOf('_'));
QTranslator qtTranslatorBase, qtTranslator, translatorBase, translator;
// Load language files for configured locale:
// - First load the translator for the base language, without territory
// - Then load the more specific locale translator
// Load e.g. qt_de.qm
if (qtTranslatorBase.load("qt_" + lang, QLibraryInfo::location(QLibraryInfo::TranslationsPath)))
app.installTranslator(&qtTranslatorBase);
// Load e.g. qt_de_DE.qm
if (qtTranslator.load("qt_" + lang_territory, QLibraryInfo::location(QLibraryInfo::TranslationsPath)))
app.installTranslator(&qtTranslator);
// Load e.g. bitcoin_de.qm (shortcut "de" needs to be defined in bitcoin.qrc)
if (translatorBase.load(lang, ":/translations/"))
app.installTranslator(&translatorBase);
// Load e.g. bitcoin_de_DE.qm (shortcut "de_DE" needs to be defined in bitcoin.qrc)
if (translator.load(lang_territory, ":/translations/"))
app.installTranslator(&translator);
// Subscribe to global signals from core
uiInterface.ThreadSafeMessageBox.connect(ThreadSafeMessageBox);
uiInterface.ThreadSafeAskFee.connect(ThreadSafeAskFee);
uiInterface.ThreadSafeHandleURI.connect(ThreadSafeHandleURI);
uiInterface.InitMessage.connect(InitMessage);
uiInterface.QueueShutdown.connect(QueueShutdown);
uiInterface.Translate.connect(Translate);
// Show help message immediately after parsing command-line options (for "-lang") and setting locale,
// but before showing splash screen.
if (mapArgs.count("-?") || mapArgs.count("--help"))
{
GUIUtil::HelpMessageBox help;
help.showOrPrint();
return 1;
}
QSplashScreen splash(QPixmap(":/images/splash"), 0);
if (GetBoolArg("-splash", true) && !GetBoolArg("-min"))
{
splash.show();
splash.setAutoFillBackground(true);
splashref = &splash;
}
app.processEvents();
app.setQuitOnLastWindowClosed(false);
try
{
// Regenerate startup link, to fix links to old versions
if (GUIUtil::GetStartOnSystemStartup())
GUIUtil::SetStartOnSystemStartup(true);
BitcoinGUI window;
guiref = &window;
if(AppInit2())
{
{
// Put this in a block, so that the Model objects are cleaned up before
// calling Shutdown().
optionsModel.Upgrade(); // Must be done after AppInit2
if (splashref)
splash.finish(&window);
ClientModel clientModel(&optionsModel);
WalletModel walletModel(pwalletMain, &optionsModel);
window.setClientModel(&clientModel);
window.setWalletModel(&walletModel);
// If -min option passed, start window minimized.
if(GetBoolArg("-min"))
{
window.showMinimized();
}
else
{
window.show();
}
// TODO: implement URI support on the Mac.
#if !defined(MAC_OSX)
// Place this here as guiref has to be defined if we dont want to lose URIs
ipcInit();
// Check for URI in argv
for (int i = 1; i < argc; i++)
{
if (boost::algorithm::istarts_with(argv[i], "gilbescoin:"))
{
const char *strURI = argv[i];
try {
boost::interprocess::message_queue mq(boost::interprocess::open_only, BITCOINURI_QUEUE_NAME);
mq.try_send(strURI, strlen(strURI), 0);
}
catch (boost::interprocess::interprocess_exception &ex) {
printf("main() - boost interprocess exception #%d: %s\n", ex.get_error_code(), ex.what());
break;
}
}
}
#endif
app.exec();
window.hide();
window.setClientModel(0);
window.setWalletModel(0);
guiref = 0;
}
// Shutdown the core and it's threads, but don't exit Bitcoin-Qt here
Shutdown(NULL);
}
else
{
return 1;
}
} catch (std::exception& e) {
handleRunawayException(&e);
} catch (...) {
handleRunawayException(NULL);
}
return 0;
}
#endif // BITCOIN_QT_TEST
| [
"karltehjarl@gmail.com"
] | karltehjarl@gmail.com |
c962633bde7e98c6253ab41536ec0dd3bcd80acb | 69cb4c8f4b1968e6bb434a080b3278482b03b8f7 | /arduino/cap_touch_LED/cap_touch_LED.ino | 760ae24f730893dc7a3bf04a25769f5b29f9f414 | [] | no_license | KAZOOSH/coloroton | 6572f1f30fffcb8468a8b85167faf563979e93f9 | bd44fa012cceeb28b5354afc1db8a0312c4ea68b | refs/heads/master | 2021-05-09T18:39:01.771432 | 2018-02-10T20:19:05 | 2018-02-10T20:19:05 | 119,170,348 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,587 | ino | /*********************************************************
This is a library for the MPR121 12-channel Capacitive touch sensor
Designed specifically to work with the MPR121 Breakout in the Adafruit shop
----> https://www.adafruit.com/products/
These sensors use I2C communicate, at least 2 pins are required
to interface
Adafruit invests time and resources providing this open source code,
please support Adafruit and open-source hardware by purchasing
products from Adafruit!
Written by Limor Fried/Ladyada for Adafruit Industries.
BSD license, all text above must be included in any redistribution
**********************************************************/
#include <Wire.h>
#include "Adafruit_MPR121.h"
#include "FastLED.h"
#define MAX_HELLIGKEIT 255
#define NUM_LEDS 10 // muss gerade sein!
#define DATA_PIN 3
#define COMM_PIN 5
// You can have up to 4 on one i2c bus but one is enough for testing!
Adafruit_MPR121 cap = Adafruit_MPR121();
// Keeps track of the last pins touched
// so we know when buttons are 'released'
uint16_t lasttouched = 0;
uint16_t currtouched = 0;
CRGB leds[NUM_LEDS+1];
CHSV color_hsv;
uint8_t growing=0;
long start_time;
uint8_t led_count;
void setup() {
FastLED.addLeds<NEOPIXEL, DATA_PIN>(leds, NUM_LEDS);
while (!Serial); // needed to keep leonardo/micro from starting too fast!
Serial.begin(9600);
Serial.println("Adafruit MPR121 Capacitive Touch sensor test");
// Default address is 0x5A, if tied to 3.3V its 0x5B
// If tied to SDA its 0x5C and if SCL then 0x5D
if (!cap.begin(0x5A)) {
Serial.println("MPR121 not found, check wiring?");
while (1);
}
Serial.println("MPR121 found!");
clear_display();
FastLED.show();
pinMode(COMM_PIN,OUTPUT);
digitalWrite(COMM_PIN,LOW);
led_count=0;
}
void loop() {
// Get the currently touched pads
//cap.setThresholds( 1, 1);
currtouched = cap.touched();
for (uint8_t i=0; i<1; i++) {
// it if *is* touched and *wasnt* touched before, alert!
if ((currtouched & _BV(i)) && !(lasttouched & _BV(i)) ) {
Serial.print(i); Serial.println(" touched");
digitalWrite(COMM_PIN,HIGH);
growing=1;
start_time=millis();
}
// if it *was* touched and now *isnt*, alert!
if (!(currtouched & _BV(i)) && (lasttouched & _BV(i)) ) {
Serial.print(i); Serial.println(" released");
digitalWrite(COMM_PIN,LOW);
growing=0;
start_time=millis();
}
}
if (growing) {
if (millis()-start_time>100) {
if (led_count<NUM_LEDS/2) {
led_count++;
//Serial.print(led_count-1);
leds[led_count-1]=CRGB::White;
leds[NUM_LEDS-led_count]=CRGB::White;
FastLED.show();
}
start_time=millis();
}
}
else {
if (millis()-start_time>100){
if (led_count>0) {
led_count--;
//Serial.print(led_count);
leds[led_count]=CRGB::Black;
leds[NUM_LEDS-led_count-1]=CRGB::Black;
FastLED.show();
}
start_time=millis();
}
}
// reset our state
lasttouched = currtouched;
// hier gibts es die Rohwerte des Sensorwertes und der Baseline aus wenn man das sehen will
/*
// debugging info
for (uint8_t i=0; i<1; i++) {
Serial.print(cap.filteredData(i)); Serial.print("\t");
Serial.print(" ");
Serial.print(cap.baselineData(i)); Serial.print("\t");
Serial.print(" ");
}
Serial.println();
delay(50);
*/
}
void clear_display()
{
int ct_led;
for (int ct_x = 0; ct_x < NUM_LEDS; ct_x++)
{
leds[ct_x] = CRGB::Black;
}
}
| [
"hi@brinoausrino.com"
] | hi@brinoausrino.com |
a6932d888bed5d7f34b29f5515a621e8771f0108 | b7178a60b76d07df30269b76768cb5e8be407efb | /maxmind/src/main/cpp/maxmind.cpp | 3e43b061bbfa57e9a33e1143a4fa064f11a7fde6 | [] | no_license | huzongyao/AndroidMaxMind | 512259a8eb233a51d83474c4133e64e2b3e7f2e6 | 52835e96936ee751136ae77e035c86e501a37da6 | refs/heads/master | 2020-09-17T07:01:50.035838 | 2018-12-03T13:18:32 | 2018-12-03T13:18:32 | 94,491,839 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,689 | cpp | #include <jni.h>
#include <unistd.h>
#include "src/ndkhelper.h"
#include "src/GeoLite2PP.hpp"
#ifdef __cplusplus
extern "C" {
#endif
using namespace GeoLite2PP;
DB *mmdb;
JNIEXPORT jint JNICALL
Java_com_hzy_maxmind_MaxMindApi_open(JNIEnv *env, jclass type, jstring dbPath_) {
const char *dbPath = env->GetStringUTFChars(dbPath_, 0);
int ret = 1;
if (0 != access(dbPath, R_OK)) {
LOGE("db file access failed[%s]!", dbPath);
} else {
mmdb = new DB(dbPath);
ret = 0;
}
env->ReleaseStringUTFChars(dbPath_, dbPath);
return ret;
}
JNIEXPORT void JNICALL
Java_com_hzy_maxmind_MaxMindApi_close(JNIEnv *env, jclass type) {
if (mmdb != NULL) {
delete (mmdb);
}
}
JNIEXPORT jstring JNICALL
Java_com_hzy_maxmind_MaxMindApi_lookupIpString(JNIEnv *env, jclass type, jstring ipAddr_) {
const char *ipAddr = env->GetStringUTFChars(ipAddr_, 0);
std::string result = mmdb->lookup(ipAddr);
env->ReleaseStringUTFChars(ipAddr_, ipAddr);
return env->NewStringUTF(result.c_str());
}
JNIEXPORT jstring JNICALL
Java_com_hzy_maxmind_MaxMindApi_getLibVersion(JNIEnv *env, jclass type) {
std::string result = mmdb->get_lib_version_mmdb();
return env->NewStringUTF(result.c_str());
}
JNIEXPORT jstring JNICALL
Java_com_hzy_maxmind_MaxMindApi_getLib2PPVersion(JNIEnv *env, jclass type) {
std::string result = mmdb->get_lib_version_geolite2pp();
return env->NewStringUTF(result.c_str());
}
JNIEXPORT jstring JNICALL
Java_com_hzy_maxmind_MaxMindApi_getMetaData(JNIEnv *env, jclass type) {
std::string result = mmdb->get_metadata();
return env->NewStringUTF(result.c_str());
}
#ifdef __cplusplus
}
#endif | [
"hzy3774@gmail.com"
] | hzy3774@gmail.com |
92290fb914c8420942efd805c1ed0f7c8bc7ab05 | b16cf0800ca9e824d2d57abb7d044e976ada562d | /src/range_visualizer/src/new_range.cpp | a6ec7e8d3c331fecaa90f268a8a7d12b9f098dfc | [] | no_license | cityzmaster/abdallah-rtech | 08e33000cb9049d773a3340906b152412e7ca5a0 | e8fcad38d92cae0362b92018d1b28547f268092f | refs/heads/master | 2020-04-24T09:05:19.841052 | 2019-05-30T15:04:41 | 2019-05-30T15:04:41 | 171,852,020 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 763 | cpp | #include "ros/ros.h"
#include "sensor_msgs/Range.h"
sensor_msgs::Range msg;
void sensorCallback(sensor_msgs::Range sensor)
{
msg.range =sensor.range/2;
}
int main(int argc, char **argv)
{
msg.header.frame_id = "ultrasonic/raw";
msg.range =0;
msg.min_range = 0.02;
msg.max_range = 4;
msg.field_of_view = 0.785;
ros::init(argc, argv, "range_publisher");
ros::NodeHandle n;
ros::Subscriber sub = n.subscribe("ultrasonic/raw", 1000, sensorCallback);
ros::Publisher range_publisher = n.advertise<sensor_msgs::Range>("ultrasonic/filtered", 1000);
ros::Rate loop_rate(10);
int count = 0;
while (ros::ok())
{
range_publisher.publish(msg);
ros::spinOnce();
loop_rate.sleep();
++count;
}
return 0;
}
| [
"cityzmaster38@gmail.com"
] | cityzmaster38@gmail.com |
f8cbb421aa83d65023533e7961904d37ee3e72a3 | 59d27bbde052940b1af6be9f6f96fb97a808afaf | /mfc-exam-full-win/CenticFirewall/SystemGraph.cpp | b72ccee235066384205b6a52d0d743a80b65cb47 | [] | no_license | eslinux/Windows | 92c4d988b1ce2e0091d25193509a42822ad245cb | 13f24b4507a9929c8f8a334ca01228c99786894f | refs/heads/master | 2022-05-23T10:07:24.298878 | 2022-04-02T03:50:04 | 2022-04-02T03:50:04 | 43,340,447 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 5,401 | cpp | // SystemGraph.cpp : implementation file
//
#include "stdafx.h"
#include "CenticFirewall.h"
#include "SystemGraph.h"
// SystemGraph
IMPLEMENT_DYNAMIC(SystemGraph, CStatic)
SystemGraph::SystemGraph()
: m_graphMode(GRAPH_MODE_HOUR)
{
m_dataList = NULL;
m_axisPen.CreatePen(PS_SOLID, 2, RGB(0, 0, 0));
m_graphPen.CreatePen(PS_SOLID, 1, RGB(255, 0, 0));
m_hbrBkGnd.CreateSolidBrush(GetSysColor(COLOR_BTNFACE));
}
SystemGraph::~SystemGraph()
{
}
BEGIN_MESSAGE_MAP(SystemGraph, CStatic)
ON_WM_PAINT()
ON_WM_ERASEBKGND()
END_MESSAGE_MAP()
// SystemGraph message handlers
static const char *weekStr[] = {{"Mon"}, {"Tue"}, {"Wed"}, {"Thu"}, {"Fri"}, {"Sat"}, {"Sun"}};
static const char *monthStr[] = {{"Jan"}, {"Feb"}, {"Mar"}, {"Apr"}, {"May"}, {"Jun"}, {"Jul"}, {"Aug"}, {"Sep"}, {"Oct"}, {"Nov"}, {"Dec"}};
void SystemGraph::CraeteGraph(void)
{
CPaintDC dc(this); // device context for painting
int i = 0;
int stepX =0, stepY = 0;
int numStep =0;
//TRACE(L"SystemGraph::CraeteGraph %d\n", m_graphMode);
#if 0
COLORREF qCircleColor = RGB(255, 0, 0);
CPen qCirclePen(PS_SOLID, 1, qCircleColor);
dc.SelectObject(&qCirclePen);
dc.Ellipse(0, 0, 200, 100);
COLORREF qLineColor = RGB(0, 0, 0);
CPen qLinePen(PS_SOLID, 4, qLineColor);
dc.SelectObject(&qLinePen);
dc.MoveTo(0,100 );
dc.LineTo(0 ,0);
dc.MoveTo(0,100 );
dc.LineTo(100 ,100);
CString szBuffer(_T("hello"));
dc.SetBkColor(GetSysColor(COLOR_BTNFACE));
dc.TextOutW(10,10, szBuffer);
dc.TextOutW(10,110, szBuffer);
#endif
//clean old screen
m_nSave = dc.SaveDC();
GetClientRect(&m_rectGraphDim);
dc.FillRect(&m_rectGraphDim,&m_hbrBkGnd);
//draw XY coordinate
dc.SelectObject(&m_axisPen);
dc.MoveTo(GRAPH_X_COORD, GRAPH_Y_COORD - GRAPH_Y_AXIS_LENGTH);
dc.LineTo(GRAPH_X_COORD , GRAPH_Y_COORD);
dc.LineTo(GRAPH_X_COORD + GRAPH_X_AXIS_LENGTH , GRAPH_Y_COORD);
//text on Y axis: 0 20 40 60 80 100% (6 point)
dc.SetBkColor(GetSysColor(COLOR_BTNFACE));
CString szBuffer;
numStep = 6;
stepX = -40;
stepY = - GRAPH_Y_AXIS_LENGTH / 5;
for(i = 0; i < numStep; i++){
szBuffer.Format(_T("%d%s"), 20 * i, "%");
dc.TextOutW(GRAPH_X_COORD + stepX, GRAPH_Y_COORD + stepY *i - 10, szBuffer);
szBuffer.Empty();
}
//text on X axis
//m_graphMode = TIME_YEAR;
switch(m_graphMode){
case GRAPH_MODE_HOUR:
//0 10 20 ... 60 (7 point)
numStep = 7;
stepX = GRAPH_X_AXIS_LENGTH/(numStep-1);
stepY = 10;
for(i = 0; i < numStep; i++){
szBuffer.Format(_T("%d"), 10 * i);
dc.TextOutW(GRAPH_X_COORD + stepX * i, GRAPH_Y_COORD + stepY, szBuffer);
szBuffer.Empty();
}
break;
case GRAPH_MODE_DAY:
//0 2 4 ... 24 Hour ( 13 point)
numStep = 13;
stepX = GRAPH_X_AXIS_LENGTH/(numStep-1);
stepY = 10;
for(i = 0; i < numStep; i++){
szBuffer.Format(_T("%d"), 2 * i);
dc.TextOutW(GRAPH_X_COORD + stepX * i, GRAPH_Y_COORD + stepY, szBuffer);
szBuffer.Empty();
}
break;
case GRAPH_MODE_WEEK:
//Mon Tue Wed Thu Fri Sat Sun ( 7 point)
numStep = 7;
stepX = GRAPH_X_AXIS_LENGTH/(numStep-1);
stepY = 10;
for(i = 0; i < numStep; i++){
//TRACE(_T("%s \n"), "mom");
//TRACE(traceAppMsg, 0, "%s \n", weekStr[i]);
//szBuffer.Format(_T("%s"), weekStr[i]);
szBuffer = CString( weekStr[i] );
dc.TextOutW(GRAPH_X_COORD + stepX * i, GRAPH_Y_COORD + stepY, szBuffer);
szBuffer.Empty();
}
break;
case GRAPH_MODE_MONTH:
//Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec (12 point)
numStep = 12;
stepX = GRAPH_X_AXIS_LENGTH/(numStep-1);
stepY = 10;
for(i = 0; i < numStep; i++){
//TRACE(_T("%s \n"), "mom");
//TRACE(traceAppMsg, 0, "%s \n", weekStr[i]);
//szBuffer.Format(_T("%s"), weekStr[i]);
szBuffer = CString( monthStr[i] );
dc.TextOutW(GRAPH_X_COORD + stepX * i, GRAPH_Y_COORD + stepY, szBuffer);
szBuffer.Empty();
}
break;
case GRAPH_MODE_YEAR:
//2015 2016 ... 2020 (6 point)
numStep = 6;
stepX = GRAPH_X_AXIS_LENGTH/(numStep-1);
stepY = 10;
for(i = 0; i < numStep; i++){
szBuffer.Format(_T("%d"), 2015 + i);
dc.TextOutW(GRAPH_X_COORD + stepX * i, GRAPH_Y_COORD + stepY, szBuffer);
szBuffer.Empty();
}
break;
default:
break;
}
//draw graph
SysGraphDataIt_t it=m_dataList->begin();
dc.SelectObject(&m_graphPen);
if(it != m_dataList->end()){
dc.MoveTo(GRAPH_X_COORD, GRAPH_Y_COORD - (*it)*GRAPH_Y_AXIS_LENGTH/100);
}
for(i = 0; i < numStep; i++){
if(it != m_dataList->end()){
dc.LineTo(GRAPH_X_COORD + stepX * i, GRAPH_Y_COORD - (*it)*150/100);
it++;
}
}
dc.RestoreDC(m_nSave);
}
void SystemGraph::OnPaint()
{
//CPaintDC dc(this); // device context for painting
// TODO: Add your message handler code here
// Do not call CStatic::OnPaint() for painting messages
//TRACE(_T("%s<%d>"),__FUNCTION__,__LINE__);
//TRACE(L"SystemGraph::OnPaint \n");
CraeteGraph();
}
BOOL SystemGraph::OnEraseBkgnd(CDC* pDC)
{
// TODO: Add your message handler code here and/or call default
return CStatic::OnEraseBkgnd(pDC);
}
void SystemGraph::ChangeMode(enum SysGraphMode_t mode)
{
m_graphMode = mode;
TRACE(L"SystemGraph::ChangeMode %d\n", m_graphMode);
}
void SystemGraph::SetGraphData(SysGraphData_t * data)
{
m_dataList = data;
}
| [
"luongduyninh@gmail.com"
] | luongduyninh@gmail.com |
60c307cdcd86cfdf086eca24b08da795001a44c6 | 5f873e51f3d2e9a488ca9b8117528099ee657ce2 | /helper.cpp | 339f99030e83e244e64edca8cc70aa421ef0483a | [] | no_license | ruangroc/PropertyTycoonGame | 701c9629f1d15780f42e484a00183b4e068be54c | c7cee15af817495ba077424287b1c508a82fb99a | refs/heads/master | 2022-11-06T14:02:37.538755 | 2020-06-22T18:43:02 | 2020-06-22T18:43:02 | 274,049,730 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,748 | cpp | #include "helper.h"
using namespace std;
/************************************************************************************
* Function: check_option
* Description: checks for valid menu option
* Parameters: string input
* Pre-conditions: input must be a string
* Post-conditions: returns a bool
* *********************************************************************************/
bool check_option(string input) {
if (input.length() != 1) {
return false;
}
if (input != "1" && input != "2" && input != "3" && input != "4") {
return false;
}
return true;
}
/*********************************************************************************
* Function: check_numbers
* Description: checks that the string contains only numbers
* Parameters: string input
* Pre-conditions: input must be a string
* Post-conditions: returns a bool
* ******************************************************************************/
bool check_numbers(string input) {
if (input.length() == 0) {
return false;
}
for (int i = 0; i < input.length(); i++) {
if ((int) input[i] < 48 || (int) input[i] > 57) {
return false;
}
}
return true;
}
/**********************************************************************************
* Function: string_to_int
* Description: converts a string into an int
* Parameters: string input
* Pre-conditions: input must be a string containing an int
* Post-conditions: returns an int
* *******************************************************************************/
int string_to_int(string input) {
int valid_int = 0;
int len = input.length();
for (int i = 0; i < len; i++) {
int num = ((int) input[i] - 48);
valid_int += num * pow(10, (len-1-i));
}
return valid_int;
}
| [
"ruangroc@oregonstate.edu"
] | ruangroc@oregonstate.edu |
ca4c7ccdb45e0b2a4910045c0db576365270f2fd | 3f7b07a12d74629752012b5c21dbab6c3d62f22a | /3rd/plog/Appenders/ConsoleAppender.h | a7e4369ec81215e0d54a973a320f0b1de5bfcb46 | [
"MIT"
] | permissive | openkitchen/epp | b715062ba48c73d0724eb0a87b447fd792ae185b | ee92ec67235a978957db8aef99f5c996b31b9d7c | refs/heads/master | 2021-06-06T21:54:48.808198 | 2018-09-15T16:56:49 | 2018-09-15T16:56:49 | 148,861,593 | 0 | 0 | MIT | 2020-05-01T08:14:55 | 2018-09-15T02:25:36 | C++ | UTF-8 | C++ | false | false | 764 | h | #pragma once
#include <iostream>
#include <plog/Util.h>
namespace plog
{
template<class Formatter>
class ConsoleAppender : public IAppender
{
public:
ConsoleAppender()
{
#ifdef _WIN32
::setlocale(LC_ALL, "");
#endif
}
virtual void write(const Record& record)
{
util::nstring str = Formatter::format(record);
util::MutexLock lock(m_mutex);
writestr(str);
}
protected:
void writestr(const util::nstring& str)
{
#ifdef _WIN32
std::wcout << str << std::flush;
#else
std::cout << str << std::flush;
#endif
}
protected:
util::Mutex m_mutex;
};
}
| [
"winston2tim@gmail.com"
] | winston2tim@gmail.com |
34d775188b22be19742c2dc591e9ac1ccae4e65b | c8396fce9c9e6d9312707cd44b274670ff67c7d4 | /framesender.h | 6e5b94c4127dc51ef1be9bc3ead9e3f87209fc2b | [] | no_license | pipi95/FrameOperator | c86618a7ab8b30185958dfac519aecef5ed3741e | 655af0a2bbc8cb85f0f85a26276c7a442858808d | refs/heads/master | 2020-03-28T22:31:37.009065 | 2018-09-18T08:25:10 | 2018-09-18T08:25:10 | 149,239,588 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 768 | h | #ifndef FRAMESENDER_H
#define FRAMESENDER_H
#include "equipframe.h"
#include "frameoperator_global.h"
#include <QHostAddress>
#include <QObject>
#include <QtNetwork/QTcpSocket>
#include <QtSerialPort/QSerialPort>
namespace FrameOperatorSpace {
class FRAMEOPERATORSHARED_EXPORT FrameSender : public QObject {
Q_OBJECT
public:
enum SourceType {
SerialportType,
TcpType,
MulticastType
};
FrameSender(SourceType type, QIODevice* device, QObject* parent = nullptr);
bool IsSourceValid();
~FrameSender();
signals:
public slots:
void Send(const EquipFrame& fr);
void Send(const QByteArray& content);
private:
bool isSourceValid;
SourceType sourceType;
QIODevice* source;
};
}
#endif // FRAMESENDER_H
| [
"pipi95@163.com"
] | pipi95@163.com |
41d8906e1ad48a326ae45aee830d9b582541615c | 50c2e632e04c86aca1be33dfe2b2671468456595 | /fb_hacker_cup/price/subarray.cpp | c1b36586a51f689820943b652115f28d78d47466 | [] | no_license | rohith2506/Algo-world | b413b4c4b572ec1d0c9539af526fa7edfa584444 | 064241d0c8f3ec43f9abc7905cab75214db1b9e0 | refs/heads/master | 2022-08-03T13:37:56.406913 | 2019-08-16T12:23:28 | 2019-08-16T12:23:28 | 24,947,086 | 0 | 0 | null | 2022-07-21T22:32:16 | 2014-10-08T15:28:36 | Python | UTF-8 | C++ | false | false | 1,896 | cpp | #include <iostream>
#include <stdio.h>
#include <vector>
#include <cmath>
using namespace std;
int ceilSearch(vector<long long int> arr, long long int low, long long int high, long long int x) {
long long int mid;
if(x <= arr[low]) return low;
if(x > arr[high]) return -1;
mid = (low + high)/2;
if(arr[mid] == x) return mid;
else if(arr[mid] < x) {
if(mid + 1 <= high && x <= arr[mid+1]) return mid + 1;
else return ceilSearch(arr, mid+1, high, x);
}
else {
if(mid - 1 >= low && x > arr[mid-1]) return mid;
else return ceilSearch(arr, low, mid - 1, x);
}
}
void print_vector(vector<long long int> v) {
for(int i=0; i<v.size(); i++)
cout << v[i] << " ";
cout << endl;
}
long long int calc(vector<long long int> v, long long int val) {
vector<long long int> precompute, hash_value;
precompute.push_back(v[0]);
for(int i=1; i<v.size(); i++) precompute.push_back(precompute[i-1] + v[i]);
long long int result = 0;
for(int i=0; i<precompute.size(); i++) {
int index = ceilSearch(precompute, 0, i-1, (precompute[i] - val));
hash_value.push_back(index);
}
for(int i=0; i<precompute.size(); i++) {
long long int temp_res = precompute[i];
if(precompute[i] <= val) result += (i+1);
else {
int index = hash_value[i];
if(index != -1) result += (i - index);
}
}
return result;
}
int main() {
int test_cases;
cin >> test_cases;
for(int tst=1; tst <= test_cases; tst++) {
long long int n, p;
cin >> n >> p;
vector<long long int> v;
for(int i=0; i<n; i++) {
long long int value;
cin >> value;
v.push_back(value);
}
long long int result = calc(v, p);
cout << "Case #" << tst << ": " << result << endl;
}
return 0;
}
| [
"rohith.uppala369@gmail.com"
] | rohith.uppala369@gmail.com |
6e115f4d577346cbfc3a1420a5c0d9cb4bf4c4e4 | 05f7573db159e870fb26c847991c4cb8c407ed4c | /VBF/Source/VBF_CORE4.0/VBF_Interface/VBF_Plot/IVBF_MarkModelCreator.h | 5a152cb8d2f1ee88ef367bda701d5238e4eefafa | [] | no_license | riyue625/OneGIS.ModelingTool | e126ef43429ce58d22c65832d96dbd113eacbf85 | daf3dc91584df7ecfed6a51130ecdf6671614ac4 | refs/heads/master | 2020-05-28T12:12:43.543730 | 2018-09-06T07:42:00 | 2018-09-06T07:42:00 | null | 0 | 0 | null | null | null | null | GB18030 | C++ | false | false | 931 | h | //*******************************************************************
// FileName:IVBF_MarkModelCreator.h
// Function:标图实体模型创建接口
// Author:
// Date:
//*******************************************************************
#ifndef __IVBF_MARK_MODEL_CREATOR_H__
#define __IVBF_MARK_MODEL_CREATOR_H__
#include <VBF_Plot/IVBF_MarkModel.h>
#include <string>
// 定义3D模块中专有接口的ID号
const char VBF_INTERFACE_3DPLOT_MODEL_CREATOR[] = "三维标图模型创建接口";
//--------------------------------------------------------------------
// 定义接口:IVBF_MarkModelCreator
// 接口描述:实体模型创建器
//--------------------------------------------------------------------
class IVBF_MarkModelCreator
{
public:
virtual ~IVBF_MarkModelCreator() {}
public:
virtual IVBF_MarkModel* CreateMarkModel(unsigned int nMarkType, const std::string& strMarkID)=0;
};
#endif
| [
"robertsam@126.com"
] | robertsam@126.com |
a28b084eab5320f06e75baf7d42a6dd78e66cf6e | b5b56ce3eb1dfe324eafbda3e0e5f338c5dd72e2 | /Shared/include/ClipUtil.h | e7e5246893519d3cd8818fcdd71497798eee3376 | [] | no_license | wayfinder/Wayfinder-Server | 5cb91281b33cea6d8f6d74550b6564a71c4be1d7 | a688546589f246ee12a8a167a568a9c4c4ef8151 | refs/heads/master | 2021-01-22T22:39:08.348787 | 2012-03-31T11:34:42 | 2012-03-31T11:34:42 | 727,490 | 8 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 7,578 | h | /*
Copyright (c) 1999 - 2010, Vodafone Group Services Ltd
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the Vodafone Group Services Ltd nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef CLIP_UTIL_H
#define CLIP_UTIL_H
#include "config.h"
#include <vector>
#include "MC2BoundingBox.h"
#include "MC2Point.h"
#include "MC2Coordinate.h"
class ClipUtil {
public:
template<class ITERATOR>
static int clipPolyToBBoxFast( const MC2BoundingBox& bbox,
vector<MC2Coordinate>& result,
ITERATOR begin,
ITERATOR end ) {
vector<MC2Point> pointVec;
pointVec.clear();
{
for ( ITERATOR it = begin;
it != end;
++it ) {
pointVec.push_back( MC2Point( it->lon, it->lat) );
}
}
int resultBool = clipPolyToBBoxFast( bbox, pointVec );
if ( resultBool ) {
result.clear();
result.reserve( pointVec.size() );
for( vector<MC2Point>::iterator it = pointVec.begin();
it != pointVec.end();
++it ) {
result.push_back( MC2Coordinate( it->getY(), it->getX() ) );
}
}
return resultBool;
}
/**
* Reduces the number of vertices outside the boundingbox and
* computes the Cohen-Sutherland outcodes for each of those
* vertices.
* <br />
* The XYHELPER is a class that should have the functions
* getX(POINT_SEQUENCE::value_type), getY(POINT_SEQUENCE::value_type)
* and getX(ITERATABLE::value_type), getY(ITERATABLE::value_type).
* If the value types are the same, then just two functions are
* needed.
* @see GfxUtility.
*
* @param bbox The boundingbox to reduce the polygon.
* @param vertices The input polygon.
* @param reducedVertices [OUT] The resulting reduced polygon.
* @param outcodes The Cohen-Sutherland outcodes of the
* reduced vertices are added to this
* vector<byte> if not NULL.
* @return True if the vectors got filled with any vertices /
* outcodes, false otherwise.
*/
template<class XYHELPER, class ITERATABLE, class POINT_SEQUENCE >
inline static bool reduceByBBox( const XYHELPER& xyhelper,
const MC2BoundingBox& bbox,
const ITERATABLE& vertices,
POINT_SEQUENCE& reducedVertices,
vector<uint8>* outcodes ) {
if ( vertices.size() < 3 ) {
return false;
}
// Initialize the vertice and outcode vector.
uint32 prevOutcode;
uint32 firstOutcode;
typename ITERATABLE::value_type currVertex;
typename POINT_SEQUENCE::value_type nextVertex;
// Add all of the polygon
typename POINT_SEQUENCE::const_iterator it = vertices.begin();
currVertex = *it;
prevOutcode = firstOutcode =
bbox.getCohenSutherlandOutcode(xyhelper.getY(currVertex),
xyhelper.getX(currVertex) );
// Add the first one.
reducedVertices.push_back( currVertex );
if ( outcodes ) {
outcodes->push_back( prevOutcode );
}
++it;
currVertex = *it;
uint32 currOutcode =
bbox.getCohenSutherlandOutcode(xyhelper.getY(currVertex),
xyhelper.getX(currVertex) );
++it;
for ( ; it != vertices.end(); ++it ) {
nextVertex = *it;
byte nextOutcode =
bbox.getCohenSutherlandOutcode(xyhelper.getY(nextVertex),
xyhelper.getX(nextVertex));
if ((prevOutcode & currOutcode & nextOutcode) == 0) {
reducedVertices.push_back( currVertex );
if ( outcodes ) {
outcodes->push_back( currOutcode );
}
prevOutcode = currOutcode;
}
currOutcode = nextOutcode;
currVertex = nextVertex;
}
// Check the last one.
if ((prevOutcode & currOutcode & firstOutcode) == 0) {
reducedVertices.push_back( currVertex );
if ( outcodes ) {
outcodes->push_back( currOutcode );
}
}
if ( xyhelper.getX(reducedVertices.back()) !=
xyhelper.getX(reducedVertices.front()) ||
xyhelper.getY(reducedVertices.back()) !=
xyhelper.getY(reducedVertices.front()) ) {
// Add first coordinate last again.
reducedVertices.push_back( reducedVertices.front() );
}
// If we end up with less than three coordinates then it's not a
// valid closed polygon anymore.
bool retVal;
if (reducedVertices.size() < 3) {
if ( outcodes ) {
outcodes->clear();
}
retVal = false;
} else {
retVal = true;
}
return (retVal);
}
private:
static void clipSegment( const MC2Point& prevVertex,
const MC2Point& currVertex,
int prevInside,
int currInside,
byte currOutcode,
const MC2BoundingBox* bbox,
byte boundaryOutcode,
vector<byte>& resOutcodes,
vector<MC2Point>& resVertices );
static int clipToBoundary( const byte boundaryOutcode,
const MC2BoundingBox* bbox,
vector<MC2Point>& vertices,
vector<byte>& outcodes,
vector<MC2Point>& resVertices,
vector<byte>& resOutcodes );
int static clipPolyToBBoxFast( const MC2BoundingBox& bbox,
vector<MC2Point>& vertices );
};
#endif
| [
"daniel.n.pettersson@gmail.com"
] | daniel.n.pettersson@gmail.com |
36c1f589c6e3023f2cd98bd8c9a22b0ee92d3372 | 0006f89c8d952bcf14a6150e9c26c94e47fab040 | /src/trace/D3DDriver/AD3D9/AIPixelShaderImp_9.cpp | 52034e0231acf86dc4227e4f77f731cc092fa939 | [
"BSD-3-Clause"
] | permissive | cooperyuan/attila | eceb5d34b8c64c53ffcc52cd96b684d4f88b706f | 29a0ceab793b566c09cf81af26263e4855842c7a | refs/heads/master | 2016-09-05T18:55:56.472248 | 2013-06-29T14:42:02 | 2013-06-29T14:42:02 | 10,222,034 | 8 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 3,927 | cpp | /**************************************************************************
*
* Copyright (c) 2002 - 2011 by Computer Architecture Department,
* Universitat Politecnica de Catalunya.
* All rights reserved.
*
* The contents of this file may not be disclosed to third parties,
* copied or duplicated in any form, in whole or in part, without the
* prior permission of the authors, Computer Architecture Department
* and Universitat Politecnica de Catalunya.
*
*/
#include "Common.h"
#include "AIDeviceImp_9.h"
#include "AIPixelShaderImp_9.h"
#include "AD3D9State.h"
#include <cstring>
AIPixelShaderImp9::AIPixelShaderImp9(AIDeviceImp9* _i_parent, CONST DWORD* pFunction):
i_parent(_i_parent)
{
//AD3D9State::instance().addPixelShader(this, pFunction);
UINT programLength;
// Build the shader intermediate representation and get the shader length.
programLength = ShaderTranslator::get_instance().buildIR(pFunction, programIR);
// Allocate an array to store the program.
program = new DWORD[programLength];
// Copy the content of the program
memcpy(program, pFunction, programLength * sizeof(DWORD));
acdPixelShader = new acdlib::ACDShaderProgram* [8 * 2];
for (UINT i = 0; i < (8 * 2); i++)
acdPixelShader[i] = NULL;
nativePixelShader = new NativeShader* [8 * 2];
for (UINT i = 0; i < (8 * 2); i++)
nativePixelShader[i] = NULL;
refs = 0;
}
AIPixelShaderImp9::AIPixelShaderImp9()
{
///@note Used to differentiate when using as singleton cover
i_parent = 0;
}
AIPixelShaderImp9& AIPixelShaderImp9::getInstance()
{
static AIPixelShaderImp9 instance;
return instance;
}
HRESULT D3D_CALL AIPixelShaderImp9::QueryInterface(REFIID riid, void** ppvObj)
{
D3D9_CALL(false, "AIPixelShaderImp9::QueryInterface")
* ppvObj = cover_buffer_9;
HRESULT ret = static_cast< HRESULT >(0);
return ret;
}
ULONG D3D_CALL AIPixelShaderImp9::AddRef()
{
D3D9_CALL(false, "AIPixelShaderImp9::AddRef")
if(i_parent != 0) {
refs ++;
return refs;
}
else return 0;
}
ULONG D3D_CALL AIPixelShaderImp9::Release()
{
D3D9_CALL(true, "AIPixelShaderImp9::Release")
if(i_parent != 0) {
refs--;
if(refs == 0) {
// Remove state
/*StateDataNode* parent = state->get_parent();
parent->remove_child(state);
delete state;
state = 0;*/
}
return refs;
}
else {
// Object is used as singleton "cover"
return 0;
}
}
HRESULT D3D_CALL AIPixelShaderImp9::GetDevice(IDirect3DDevice9** ppDevice)
{
D3D9_CALL(true, "AIPixelShaderImp9::GetDevice")
*ppDevice = i_parent;
return D3D_OK;;
}
HRESULT D3D_CALL AIPixelShaderImp9::GetFunction(void* pData, UINT* pSizeOfData)
{
D3D9_CALL(false, "AIPixelShaderImp9::GetFunction")
HRESULT ret = static_cast< HRESULT >(0);
return ret;
}
acdlib::ACDShaderProgram* AIPixelShaderImp9::getAcdPixelShader(D3DCMPFUNC alpha, bool fogEnable)
{
int i = (alpha - 1) * 2 + (fogEnable ? 1 : 0);
if (acdPixelShader[i] == NULL)
{
if (nativePixelShader[i] == NULL)
nativePixelShader[i] = ShaderTranslator::get_instance().translate(programIR, alpha, fogEnable);
acdPixelShader[i] = AD3D9State::instance().createShaderProgram();
//printf("Native Shader :\n");
//printf("%s\n", nativePixelShader[i]->debug_assembler.c_str());
acdPixelShader[i]->setCode(nativePixelShader[i]->bytecode, nativePixelShader[i]->lenght);
}
return acdPixelShader[i];
}
NativeShader* AIPixelShaderImp9::getNativePixelShader(D3DCMPFUNC alpha, bool fogEnable)
{
int i = (alpha - 1) * 2 + (fogEnable ? 1 : 0);
if (nativePixelShader[i] == NULL)
nativePixelShader[i] = ShaderTranslator::get_instance().translate(programIR, alpha, fogEnable);
return nativePixelShader[i];
}
| [
"cooperyuan@gmail.com"
] | cooperyuan@gmail.com |
8553a7879103a8a0d6f0e372847fb6c75ffca1dd | 0bcf83fe35f9be2d8d24b314b97cd78970e12406 | /CsvWriter.h | b3895117d97e6707d4f7217c131b0a3969fe375d | [] | no_license | Kaeseknacker/CSVConverter | 972cb8797ad8b55088422cbcbc35991fcad1d4bb | ba1fd7dc38b813f04be0295de632c26460f52f40 | refs/heads/master | 2021-06-10T11:57:24.488380 | 2021-06-06T14:28:13 | 2021-06-06T14:28:13 | 156,982,821 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 248 | h | #ifndef CSVWRITER_H
#define CSVWRITER_H
#include <QString>
#include <QList>
#include <AccountingEntry.h>
class CsvWriter
{
public:
CsvWriter();
void writeFile(QString filePath, QList<AccountingEntry> entries);
};
#endif // CSVWRITER_H
| [
"Raphaelspraul@hotmail.de"
] | Raphaelspraul@hotmail.de |
d2590f195bf599d8728500e488bf3fa3104ad33c | 0434af05fc70b33562c488639652ddc5ebb763bd | /1022.sum-of-root-to-leaf-binary-numbers.cpp | 22f76d3a90a03f284dfc61698aa55c55d0b57685 | [] | no_license | huyinhou/leetcode | 4d131dc4cb915c6556db284e1359e0fa5955b5d9 | 5ff8d793f3180b674120f62363c80325ed5f1b80 | refs/heads/master | 2022-01-21T17:38:34.271552 | 2019-06-22T12:48:08 | 2019-06-22T12:48:08 | 113,725,393 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,766 | cpp |
// #define LEETCODE
#ifdef LEETCODE
#include <cassert>
#include <cstddef>
#include <cstdio>
#include <string>
#include <ostream>
#include <iterator>
#include <iostream>
#include <vector>
#include <numeric>
using namespace std;
struct TreeNode {
int val;
TreeNode *left;
TreeNode *right;
TreeNode(int x) : val(x), left(NULL), right(NULL) {}
};
#endif
class Solution {
public:
int sumRootToLeaf(TreeNode* root) {
return sumRootToLeaf(root, 0);
}
int sumRootToLeaf(TreeNode *node, unsigned base) {
base <<= 1;
// cout<<base<<" "<<node->val<<std::endl;
if (node->val) {
base |= 1;
}
int ret = 0;
if (node->left) {
ret += sumRootToLeaf(node->left, base);
}
if (node->right) {
ret += sumRootToLeaf(node->right, base);
}
if (ret > 0) {
return ret;
}
return base;
}
};
#ifdef LEETCODE
TreeNode *buildTree(vector<int> &vals) {
vector<TreeNode*> nodes(vals.size());
for (int i = vals.size() - 1; i >= 0; i--) {
// cout<<i<<" ";
if (vals[i] < 0) {
continue;
}
TreeNode *node = new TreeNode(vals[i]);
node->val = vals[i];
int child = 2 * i + 1;
if (child < vals.size() && vals[child] >= 0) {
node->left = nodes[child];
}
child++;
if (child < vals.size() && vals[child] >= 0) {
node->right = nodes[child];
}
nodes[i] = node;
}
return nodes[0];
}
int main(int argc, char *argv[]) {
Solution s;
vector<int> v1{1,0,1,0,1,0,1};
TreeNode *tree = buildTree(v1);
assert(s.sumRootToLeaf(tree) == 22);
return 0;
}
#endif
| [
"huyinhou@baidu.com"
] | huyinhou@baidu.com |
cb5daff1abefd4c8447923a9a875f37bd263880e | e8a3c0b3722cacdb99e15693bff0a4333b7ccf16 | /Intra University Contest/MBSTU Team Contest 04 nov 2016/H - Lexicographically Smallest FPIS.cpp | 74fe6fa842b61b3b3f28fe06c347cd84f46dc58e | [] | no_license | piyush1146115/Competitive-Programming | 690f57acd374892791b16a08e14a686a225f73fa | 66c975e0433f30539d826a4c2aa92970570b87bf | refs/heads/master | 2023-08-18T03:04:24.680817 | 2023-08-12T19:15:51 | 2023-08-12T19:15:51 | 211,923,913 | 5 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 598 | cpp | #include<bits/stdc++.h>
using namespace std;
int main()
{
int n, freq[30], i;
string s;
scanf("%d",&n);
while(n--){
cin >> s;
memset(freq, 0, sizeof(freq));
for(i = 0; i < s.size(); i++)
{
freq[s[i] - 'a']++;
}
sort(s.begin(), s.end());
for(i = 1; i < s.size(); i++){
if(freq[s[i] - 'a'] > 1){
freq[s[i]- 'a']--;
s[i] = s[0];
}
}
sort(s.begin(), s.end());
cout << s << endl;
}
return 0;
}
| [
"piyush123kantidas@gmail.com"
] | piyush123kantidas@gmail.com |
33dd89d8f0af4f8c48b4aad4a49a0f4bb51d781f | 3f44cb39bedadd315e8adad341b417205cb9c098 | /include/Classes.h | 34785d437acb93c037251338d18f2fd3eadb83fc | [
"LicenseRef-scancode-cecill-b-en"
] | permissive | etrange02/Fux | dc10e6246a7cd8a4f420cb2c6ceb4d24317958fd | 2f49bcd8ec82eb521092c9162e01c8b0d00222ab | refs/heads/master | 2020-12-24T07:42:13.122844 | 2017-06-18T09:20:02 | 2017-06-18T09:20:02 | 10,109,121 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 139 | h | #ifndef CLASSES_H_INCLUDED
#define CLASSES_H_INCLUDED
class FichierListe;
class Parametre;
class SliderSon;
#endif // CLASSES_H_INCLUDED
| [
"david.lecoconnier@free.fr"
] | david.lecoconnier@free.fr |
3844acd136e04f77fcf55f32c1bb1959cf72d07e | 2999c075c4e39d2f6d84e2281c90e8d925c800ee | /R-Type/Network/Headers/UDPSocket.h | dd02b2e4ad8595329ab7add02e1dd4e54921eb77 | [] | no_license | lelabo-m/R-Type | b69c31aae2c3451a508059e5980c8e3b5fb572a3 | 68933e5b979b6ae3ef0d9d2bc1b066212cc58391 | refs/heads/master | 2021-09-28T02:48:51.492494 | 2015-04-25T22:03:01 | 2015-04-25T22:03:01 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 857 | h | #ifndef _UDPSOCKET_H_
# define _UDPSOCKET_H_
# define UDP_PORT 6666
#ifdef _WIN32
#define _WINSOCK_DEPRECATED_ON_WARNINGS
#include <Winsock2.h>
#pragma comment(lib, "Ws2_32.lib")
#else
#include <unistd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#endif
#include <stdexcept>
class UDPSocket
{
public:
explicit UDPSocket(unsigned short port, bool doBind = true);
~UDPSocket();
void send(struct sockaddr_in *dest, char *buffer, unsigned int length);
void recv(char *buffer, unsigned int length);
void shutdown_udp();
#ifdef _WIN32
SOCKET getSocket() const;
#else
int getSocket() const;
#endif
private:
struct sockaddr_in _self;
/////
/////
#ifdef _WIN32 // Sockets Windows
private:
SOCKET _socket;
WSADATA _wsa;
#else // Sockets Unix
private:
int _socket;
#endif
/////
/////
};
#endif
| [
"christopher.millon@outlook.com"
] | christopher.millon@outlook.com |
1fe789ea17dda77d87cb6b80d6e0dd7954c30d4a | 31d3432b94eca3d9f44fe7883971b8de5ef89338 | /include/elemental/lapack-like/ApplyPackedReflectors.hpp | 11c334eb7fdaa06618ba395c125cbfa1130a9639 | [] | no_license | ahmadia/elemental | 2ffa196b7e7ce26d21606cfe85159ea7be7ee5fc | 5e69b2dd00383b669857b0ee42782cf8efc2ce7b | refs/heads/master | 2016-09-05T14:57:24.314870 | 2012-05-05T22:05:18 | 2012-05-05T22:05:18 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 8,964 | hpp | /*
Copyright (c) 2009-2012, Jack Poulson
All rights reserved.
This file is part of Elemental.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the owner nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
#include "./ApplyPackedReflectors/UTUtil.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLLHB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLLHF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLLVB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLLVF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLUHB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLUHF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLUVB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsLUVF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRLHB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRLHF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRLVB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRLVF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRUHB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRUHF.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRUVB.hpp"
#include "./ApplyPackedReflectors/ApplyPackedReflectorsRUVF.hpp"
namespace elem {
template<typename R>
inline void
ApplyPackedReflectors
( LeftOrRight side, UpperOrLower uplo,
VerticalOrHorizontal dir, ForwardOrBackward order,
int offset,
const DistMatrix<R,MC,MR>& H,
DistMatrix<R,MC,MR>& A )
{
#ifndef RELEASE
PushCallStack("ApplyPackedReflectors");
#endif
// Since the complex version does not have the same argument list, there is
// currently no good way to ensure that this version is not called with
// complex datatypes. Until C++11 compilers are commonplace, we cannot
// use static_assert either.
if( IsComplex<R>::val )
throw std::logic_error("Called real routine with complex datatype");
if( side == LEFT )
{
if( uplo == LOWER )
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsLLVF( offset, H, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsLLVB( offset, H, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsLLHF( offset, H, A );
else
internal::ApplyPackedReflectorsLLHB( offset, H, A );
}
else
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsLUVF( offset, H, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsLUVB( offset, H, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsLUHF( offset, H, A );
else
internal::ApplyPackedReflectorsLUHB( offset, H, A );
}
}
else
{
if( uplo == LOWER )
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsRLVF( offset, H, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsRLVB( offset, H, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsRLHF( offset, H, A );
else
internal::ApplyPackedReflectorsRLHB( offset, H, A );
}
else
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsRUVF( offset, H, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsRUVB( offset, H, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsRUHF( offset, H, A );
else
internal::ApplyPackedReflectorsRUHB( offset, H, A );
}
}
#ifndef RELEASE
PopCallStack();
#endif
}
template<typename R> // representation of a real number
inline void
ApplyPackedReflectors
( LeftOrRight side, UpperOrLower uplo,
VerticalOrHorizontal dir, ForwardOrBackward order,
Conjugation conjugation,
int offset,
const DistMatrix<Complex<R>,MC,MR >& H,
const DistMatrix<Complex<R>,MD,STAR>& t,
DistMatrix<Complex<R>,MC,MR >& A )
{
#ifndef RELEASE
PushCallStack("ApplyPackedReflectors");
#endif
if( side == LEFT )
{
if( uplo == LOWER )
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsLLVF
( conjugation, offset, H, t, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsLLVB
( conjugation, offset, H, t, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsLLHF
( conjugation, offset, H, t, A );
else
internal::ApplyPackedReflectorsLLHB
( conjugation, offset, H, t, A );
}
else
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsLUVF
( conjugation, offset, H, t, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsLUVB
( conjugation, offset, H, t, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsLUHF
( conjugation, offset, H, t, A );
else
internal::ApplyPackedReflectorsLUHB
( conjugation, offset, H, t, A );
}
}
else
{
if( uplo == LOWER )
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsRLVF
( conjugation, offset, H, t, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsRLVB
( conjugation, offset, H, t, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsRLHF
( conjugation, offset, H, t, A );
else
internal::ApplyPackedReflectorsRLHB
( conjugation, offset, H, t, A );
}
else
{
if( dir == VERTICAL && order == FORWARD )
internal::ApplyPackedReflectorsRUVF
( conjugation, offset, H, t, A );
else if( dir == VERTICAL )
internal::ApplyPackedReflectorsRUVB
( conjugation, offset, H, t, A );
else if( order == FORWARD )
internal::ApplyPackedReflectorsRUHF
( conjugation, offset, H, t, A );
else
internal::ApplyPackedReflectorsRUHB
( conjugation, offset, H, t, A );
}
}
#ifndef RELEASE
PopCallStack();
#endif
}
template<typename R> // representation of a real number
inline void
ApplyPackedReflectors
( LeftOrRight side, UpperOrLower uplo,
VerticalOrHorizontal dir, ForwardOrBackward order,
Conjugation conjugation,
int offset,
const DistMatrix<Complex<R>,MC, MR >& H,
const DistMatrix<Complex<R>,STAR,STAR>& t,
DistMatrix<Complex<R>,MC, MR >& A )
{
#ifndef RELEASE
PushCallStack("ApplyPackedReflectors");
#endif
DistMatrix<Complex<R>,MD,STAR> tDiag(A.Grid());
tDiag.AlignWithDiagonal( A, offset );
tDiag = t;
ApplyPackedReflectors
( side, uplo, dir, order, conjugation, offset, H, tDiag, A );
#ifndef RELEASE
PopCallStack();
#endif
}
} // namespace elem
| [
"jack.poulson@gmail.com"
] | jack.poulson@gmail.com |
5cbd11c3544d62cb3c772deb9be11876d18aef8c | 8a4941d24095922cb179e33075db857250257c0a | /sketch_apr27a.ino | 38d1c7f6cac1b3ab504f2803ce1baa6eaaab940a | [] | no_license | TerryBluemix/LoRaWithArduino | 0fe829863a77d4264218855b87669d0194dce88b | 1d5827adac2959f6d59217eae33c3cf1a212446b | refs/heads/master | 2021-01-01T05:28:02.109629 | 2016-05-11T05:04:17 | 2016-05-11T05:04:17 | 58,332,473 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 881 | ino | #include <SoftwareSerial.h> // 引用程式庫
// 定義連接LoRa模組的序列埠
SoftwareSerial LoRa(10, 11); // 接收腳, 傳送腳
char val; // 儲存接收資料的變數
void setup() {
Serial.begin(9600); // 與電腦序列埠連線
Serial.println("LoRa is ready!");
Serial.write('\r');
// 設定LoRa模組的連線速率
LoRa.begin(9600);
//用print 的方式自動下達AT command
}
void loop() {
//送出測試資料 lat: 25.045361, long:121.522544, batteryLevel:200, temp: 30, state: 10(00001010)
// LoRa.print("AT+DTX=22,073E4970017E2971C81E0A\r");
// delay(1500*60);
//自行輸入AT command
if (Serial.available()) {
val = Serial.read();
LoRa.print(val);
}
// 若收到LoRa模組的資料,則送到「序列埠監控視窗」
;
if (LoRa.available() ) {
val = LoRa.read();
Serial.print(val);
}
}
| [
"terrybluemix@gmail.com"
] | terrybluemix@gmail.com |
1dd8107e8f4dabedb3c7b7cfac5e48bd9c0558f6 | 27e1a0831fa730f710c7f48125092b8bfa98c8c6 | /src/runtime/CL/CLTuner.cpp | 5f82cd3fbeff09a02c41b83508d8c994238544a7 | [
"MIT"
] | permissive | adityagupta1089/ComputeLibrary | ff9c57f4f69b02d3789f72b5223bc9c1f28ad777 | 39945fde9bbb805e76c55baf3ca536a376fb00f4 | refs/heads/master | 2021-06-22T06:54:52.030052 | 2021-01-03T14:04:39 | 2021-01-03T14:04:39 | 158,011,217 | 2 | 1 | MIT | 2018-11-17T18:07:24 | 2018-11-17T18:07:24 | null | UTF-8 | C++ | false | false | 9,455 | cpp | /*
* Copyright (c) 2017-2018 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "arm_compute/runtime/CL/CLTuner.h"
#include "arm_compute/core/CL/ICLKernel.h"
#include "arm_compute/core/Error.h"
#include "arm_compute/runtime/CL/CLScheduler.h"
#include <cerrno>
#include <fstream>
#include <iostream>
#include <limits>
#include <string>
using namespace arm_compute;
CLTuner::CLTuner(bool tune_new_kernels)
: real_clEnqueueNDRangeKernel(nullptr), _lws_table(), _queue(), _queue_profiler(), _kernel_event(), _tune_new_kernels(tune_new_kernels)
{
}
bool CLTuner::kernel_event_is_set() const
{
return _kernel_event() != nullptr;
}
void CLTuner::set_cl_kernel_event(cl_event kernel_event)
{
_kernel_event = kernel_event;
}
void CLTuner::set_tune_new_kernels(bool tune_new_kernels)
{
_tune_new_kernels = tune_new_kernels;
}
bool CLTuner::tune_new_kernels() const
{
return _tune_new_kernels;
}
void CLTuner::tune_kernel_static(ICLKernel &kernel)
{
ARM_COMPUTE_UNUSED(kernel);
}
void CLTuner::tune_kernel_dynamic(ICLKernel &kernel)
{
// Get the configuration ID from the kernel
const std::string &config_id = kernel.config_id();
// Check if we need to find the Optimal LWS. If config_id is equal to default_config_id, the kernel does not require to be tuned
if(config_id != arm_compute::default_config_id)
{
auto p = _lws_table.find(config_id);
if(p == _lws_table.end())
{
if(_tune_new_kernels)
{
// Find the optimal LWS for the kernel
cl::NDRange opt_lws = find_optimal_lws(kernel);
// Insert the optimal LWS in the table
add_lws_to_table(config_id, opt_lws);
// Set Local-Workgroup-Size
kernel.set_lws_hint(opt_lws);
}
}
else
{
// Set Local-Workgroup-Size
kernel.set_lws_hint(p->second);
}
}
}
void CLTuner::add_lws_to_table(const std::string &kernel_id, cl::NDRange optimal_lws)
{
_lws_table.emplace(kernel_id, optimal_lws);
}
cl::NDRange CLTuner::find_optimal_lws(ICLKernel &kernel)
{
if(real_clEnqueueNDRangeKernel == nullptr)
{
real_clEnqueueNDRangeKernel = CLSymbols::get().clEnqueueNDRangeKernel_ptr;
// Get the default queue
_queue = CLScheduler::get().queue();
// Check if we can use the OpenCL timer with the default queue
cl_command_queue_properties props = _queue.getInfo<CL_QUEUE_PROPERTIES>();
if((props & CL_QUEUE_PROFILING_ENABLE) == 0)
{
// Set the queue for profiling
_queue_profiler = cl::CommandQueue(CLScheduler::get().context(), props | CL_QUEUE_PROFILING_ENABLE);
}
else
{
_queue_profiler = _queue;
}
}
// Start intercepting enqueues:
auto interceptor = [this](cl_command_queue command_queue, cl_kernel kernel, cl_uint work_dim, const size_t *gwo, const size_t *gws, const size_t *lws, cl_uint num_events_in_wait_list,
const cl_event * event_wait_list, cl_event * event)
{
ARM_COMPUTE_ERROR_ON_MSG(event != nullptr, "Not supported");
ARM_COMPUTE_UNUSED(event);
if(this->kernel_event_is_set())
{
// If the event is already set it means the kernel enqueue is sliced: given that we only time the first slice we can save time by skipping the other enqueues.
return CL_SUCCESS;
}
cl_event tmp;
cl_int retval = this->real_clEnqueueNDRangeKernel(command_queue, kernel, work_dim, gwo, gws, lws, num_events_in_wait_list, event_wait_list, &tmp);
// Set OpenCL event
this->set_cl_kernel_event(tmp);
return retval;
};
CLSymbols::get().clEnqueueNDRangeKernel_ptr = interceptor;
cl_ulong min_exec_time = std::numeric_limits<cl_ulong>::max();
cl::NDRange opt_lws = cl::NullRange;
const int x_step = std::max(1, kernel.window().x().step());
const int y_step = std::max(1, kernel.window().y().step());
const int z_step = std::max(1, kernel.window().z().step());
const int x_end = kernel.window().x().end() - kernel.window().x().start() / x_step > 1 ? 16 : 1;
const int y_end = kernel.window().y().end() - kernel.window().y().start() / y_step > 1 ? 16 : 1;
const int z_end = kernel.window().z().end() - kernel.window().z().start() / z_step > 1 ? 8 : 1;
// First run using the default LWS
{
cl::NDRange lws_test = cl::NullRange;
kernel.set_lws_hint(lws_test);
// Run the kernel
kernel.run(kernel.window(), _queue_profiler);
_queue_profiler.finish();
const cl_ulong start = _kernel_event.getProfilingInfo<CL_PROFILING_COMMAND_START>();
const cl_ulong end = _kernel_event.getProfilingInfo<CL_PROFILING_COMMAND_END>();
const cl_ulong diff = end - start;
_kernel_event = nullptr;
min_exec_time = diff;
}
for(int z = 1; z <= z_end; ++z)
{
for(int y = 1; y <= y_end; ++y)
{
for(int x = 1; x <= x_end; ++x)
{
cl::NDRange lws_test = cl::NDRange(x, y, z);
const bool invalid_lws = (x * y * z > static_cast<int>(kernel.get_max_workgroup_size())) || (x == 1 && y == 1 && z == 1);
if(invalid_lws)
{
continue;
}
//Set the Local-Workgroup-Size
kernel.set_lws_hint(lws_test);
// Run the kernel
kernel.run(kernel.window(), _queue_profiler);
_queue_profiler.finish();
const cl_ulong start = _kernel_event.getProfilingInfo<CL_PROFILING_COMMAND_START>();
const cl_ulong end = _kernel_event.getProfilingInfo<CL_PROFILING_COMMAND_END>();
const cl_ulong diff = end - start;
_kernel_event = nullptr;
// Check the execution time
if(diff < min_exec_time)
{
min_exec_time = diff;
opt_lws = cl::NDRange(x, y, z);
}
}
}
}
// Restore real function
CLSymbols::get().clEnqueueNDRangeKernel_ptr = real_clEnqueueNDRangeKernel;
return opt_lws;
}
void CLTuner::import_lws_table(const std::unordered_map<std::string, cl::NDRange> &lws_table)
{
_lws_table.clear();
_lws_table = lws_table;
}
const std::unordered_map<std::string, cl::NDRange> &CLTuner::lws_table() const
{
return _lws_table;
}
void CLTuner::load_from_file(const std::string &filename)
{
std::ifstream fs;
fs.exceptions(std::ifstream::badbit);
fs.open(filename, std::ios::in);
if(!fs.is_open())
{
ARM_COMPUTE_ERROR("Failed to open '%s' (%s [%d])", filename.c_str(), strerror(errno), errno);
}
std::string line;
while(!std::getline(fs, line).fail())
{
std::istringstream ss(line);
std::string token;
if(std::getline(ss, token, ';').fail())
{
ARM_COMPUTE_ERROR("Malformed row '%s' in %s (Should be of the form 'kernel_id;lws[0];lws[1];lws[2]')", ss.str().c_str(), filename.c_str());
}
std::string kernel_id = token;
cl::NDRange lws(1, 1, 1);
for(int i = 0; i < 3; i++)
{
if(std::getline(ss, token, ';').fail())
{
ARM_COMPUTE_ERROR("Malformed row '%s' in %s (Should be of the form 'kernel_id;lws[0];lws[1];lws[2]')", ss.str().c_str(), filename.c_str());
}
lws.get()[i] = support::cpp11::stoi(token);
}
// If all dimensions are 0: reset to NullRange (i.e nullptr)
if(lws[0] == 0 && lws[1] == 0 && lws[2] == 0)
{
lws = cl::NullRange;
}
add_lws_to_table(kernel_id, lws);
}
fs.close();
}
void CLTuner::save_to_file(const std::string &filename) const
{
std::ofstream fs;
fs.exceptions(std::ifstream::failbit | std::ifstream::badbit);
fs.open(filename, std::ios::out);
for(auto kernel_data : _lws_table)
{
fs << kernel_data.first << ";" << kernel_data.second[0] << ";" << kernel_data.second[1] << ";" << kernel_data.second[2] << std::endl;
}
fs.close();
}
| [
"anthony.barbier@arm.com"
] | anthony.barbier@arm.com |
8ff675f22a35ede86670dcf343cce6fb9831a7b8 | 9063052d8e2c294efa3b501d42aef2ac59d84fa0 | /운영체제 과제/찐/찐찐/sjf.cpp | a41ebf9d5881db5dcfa229767e4cf03b88ea9e5d | [] | no_license | yes99/practice2020 | ffe5502d23038eabea834ebc2b18ff724f849c4a | 100ac281f4fe6d0f991213802fbd8524451f1ac2 | refs/heads/master | 2021-07-08T00:54:19.728874 | 2021-06-13T05:52:07 | 2021-06-13T05:52:07 | 245,789,109 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 3,286 | cpp | #include <cstdio>
#include <algorithm>
#define ARRSIZE 5
typedef struct process{
int pid;
int arr_time;
int bur_time;
int pri;
int rank;
}Process;
typedef struct process_result{
int processing_time;
int wait_time;
int compl_time;
int AWaiting_time;
int ATa_time;
}result;
/*
Process makeProcess(int pid, int arrive, int burst, int pri)
{
Process p;
p.pid = pid;
p.arr_time = arrive;
p.bur_time = burst;
p.pri = pri;
return p;
}*/
bool compareArrivetime(Process left, Process right)
{
return left.arr_time < right.arr_time;
}
bool compareBursttime(Process left, Process right)
{
return left.bur_time < right.bur_time;
}
bool comparePid(Process left, Process right)
{
return left.pid < right.pid;
}
void printProcess(Process a)
{
printf("Process[%d] <arr = %d, bur = %d, pri = %d, rank = %d>\n", a.pid, a.arr_time, a.bur_time, a.pri, a.rank);
}
void printProcessArray(Process* arr, int size)
{
for(int i=0; i < size; i++)
{
printProcess(arr[i]);
}
}
void printFinalProcess(Process a, int* wt, int* ct)
{
printf("Process[%d] wt = %d, ct = %d\n", a.pid, wt[a.rank],ct[a.rank] );
}
void printFinalProcessArray(Process* arr, int size, int* wt, int* ct)
{
for(int i=0; i < size; i++)
{
printFinalProcess(arr[i], wt, ct);
}
}
int main()
{
Process procs[ARRSIZE];
// pid arival_time burst_time priority rank
procs[0] = {0, 0, 8, 0, 0};
procs[1] = {1, 2, 4, 2, 0};
procs[2] = {2, 3, 2, 1, 0};
procs[3] = {3, 5, 1, 3, 0};
procs[4] = {4, 9, 8, 0, 0};
printProcessArray(procs, ARRSIZE);
std::sort(procs, procs+5, compareArrivetime);
printf("##################### Change Arrive\n");
printProcessArray(procs, ARRSIZE);
for(int i = 0; i < ARRSIZE; i++)
{
procs[i].rank = i;
}
printf("##################### Input Rank\n");
printProcessArray(procs, ARRSIZE);
std::sort(procs, procs+5, comparePid);
printf("#####################\n");
printProcessArray(procs, ARRSIZE);
//arrive 타임을 비교해서 나열?
//걔네를 순서대로 나열하자.
//나열을 하는 과정에서 다 더해야함?
//필요한것 : wait_time complete_time, average_wait_time, average_turnaround_time
int wt[ARRSIZE];
int awt;
int tt[ARRSIZE];
int att;
int sum_wt = 0, sum_tt = 0;
int ct[ARRSIZE];
ct[0] = procs[0].bur_time;
for(int i =1; i < ARRSIZE; i++)
{
ct[i] = ct[i-1] + procs[i].bur_time;
}
for(int j = 0; j < ARRSIZE; j++)
{
tt[j] = ct[j] - procs[j].bur_time;
}
for(int k = 0; k < ARRSIZE; k++)
{
wt[k] = ct[k] - procs[k].bur_time - procs[k].arr_time;
}
for(int l = 0; l < ARRSIZE; l++)
{
sum_wt = sum_wt + wt[l];
}
for (int m = 0; m < ARRSIZE ; m++)
{
sum_tt = sum_tt + tt[m];
}
printFinalProcessArray(procs,ARRSIZE, wt, ct);
printf("\n Average Waiting time : %d", sum_wt/ARRSIZE);
printf("\n Average Turnaround time% : %d", sum_tt/ARRSIZE);
/*
ct = ct + //arrivetime
tt = ct - Process.arr_time;//arrivetime
wt = ct - Process.bur_time - Process.arr_time//bursttime - arrivetime
*/
} | [
"yes950324@naver.com"
] | yes950324@naver.com |
924dcab5dbcecc03e5a2478a988ecb08e6b3998f | cb0fd7bd24243b0582f3d4ffb1826f839dd9f174 | /504.七进制数.cpp | 66dee8419d0502df905c8b0a740fbc75762c9eae | [] | no_license | Maserhe/LeetCode | efde6e9e9e0eadc251c9e87d620986658acf4665 | 8bd29088d386df75ce4e3e0bad0a94d48799f435 | refs/heads/master | 2023-07-08T11:09:40.346412 | 2021-08-15T15:59:34 | 2021-08-15T15:59:34 | 279,108,901 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 514 | cpp | /*
* @lc app=leetcode.cn id=504 lang=cpp
*
* [504] 七进制数
*/
// @lc code=start
class Solution {
public:
string convertToBase7(int num) {
if (!num) return "0";
string ans;
bool flag = false;
if (num < 0){
num = - num;
flag = true;
}
while (num){
int t = num % 7;
ans = to_string(t) + ans;
num /= 7;
}
if (flag) ans = "-" + ans;
return ans;
}
};
// @lc code=end
| [
"982289931@qq.com"
] | 982289931@qq.com |
6f8126bec0485dd58393b96d489536fb75680133 | e823f21abc6fa47605bf78f239b03f48e1255dea | /Netvars/Netvars.hh | 3e162012a6e5c2f1103e8627d375b7cd4bf05110 | [
"WTFPL"
] | permissive | Owen0225/HelvetaCS | 09c43a4461556f8ead9678951bbea8ca6ba0707e | f49a8290325c3632cbb21a79d9bec01331ccf11e | refs/heads/main | 2023-07-19T22:59:36.093744 | 2021-08-15T02:32:58 | 2021-08-15T02:50:20 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 5,350 | hh | #pragma once
#include "../SDK/Forward.hh"
#include "../Helpers/Helpers.hh"
#include <memory>
#include <unordered_map>
#include <type_traits>
struct Netvars_t
{
Netvars_t() = default;
Netvars_t(SDK::ClientClass *pClassHead);
inline ~Netvars_t(){};
ptrdiff_t Get(Hash_t hClass, Hash_t hProperty) const;
private:
void Dump(const char *szNetworkName, SDK::RecvTable *pTable, ptrdiff_t ptrdiffOffset = 0);
std::unordered_map<Hash_t, std::unordered_map<Hash_t, ptrdiff_t>> m_umPtrdiffs;
};
/**
* @brief Utility Macros
*
* NETWORKED_CLASS will store a private hash of the class name to use when accessing
* netvars map, as we use Networked names.
*
* @todo: Either wait with this until there's an implementation of some sort of preprocessor macro
* along the lines of '__CLASS__', should be self explainatory why,
* or write a sanitized type information class (__FUNCTION__ has C style type specifiers for structs, classes, etc),
* (source_location::current().function_name() returns _func_) etc
*
* NETWORKED_VARIABLE_DEDUCE(_[SI/NN]) will call to the Class Member Hungarian Notation Type Parser
* to deduce NetVar type in shorthand:
*
* Drawbacks:
* - Valve is inconsistent with Hungarian notation, as you can observe in the parser comments there,
* I've noted that they use many notations interchangibly and differently (i.e. n for nibbles AND for count,
* f for flag/bitmask and for float despite 'fl' existing, etc...)
*
* For concrete examples of the aforementioned refer to Helpers.hh directly
*
* Fix approach:
* - Implemented ..._SI and ..._NN which respectively make integers shorts (for m_iItemDefinitionIndex cases), or make the 'n'
* notation return a nibble (uint8_t)
*
* In any case:
* - Implemented direct type specifier ability for Netvars.
*
* Extra dumb cases where there's nothing we can (immediately) do:
* - Implemented OFFSET method
*
*/
// ========================================================================================================================================
#define NETWORKED_CLASS(x, ...) \
class x \
{ \
constexpr static Hash_t hHash = HASH(#x); \
\
public: \
__VA_ARGS__ \
};
#define NETWORKED_CLASS_INHERIT(x, inherit, ...) \
class x : public inherit \
{ \
constexpr static Hash_t hHash = HASH(#x); \
\
public: \
__VA_ARGS__ \
};
#define NETWORKED_VARIABLE_DEDUCE(x) \
GET_TYPE(#x) & x() \
{ \
static ptrdiff_t CONCAT(x, _ptrdiff) = g_pNetvars->Get(hHash, HASH(#x)); \
return *(GET_TYPE(#x) *)((uintptr_t)this + CONCAT(x, _ptrdiff)); \
}
#define PNETWORKED_VARIABLE_DEDUCE(x) \
GET_TYPE(#x) \
x() \
{ \
static ptrdiff_t CONCAT(x, _ptrdiff) = g_pNetvars->Get(hHash, HASH(#x)); \
return (GET_TYPE(#x))((uintptr_t)this + CONCAT(x, _ptrdiff)); \
}
#define NETWORKED_VARIABLE_DEDUCE_NN(x) \
GET_TYPE_NN(#x) & x() \
{ \
static ptrdiff_t CONCAT(x, _ptrdiff) = g_pNetvars->Get(hHash, HASH(#x)); \
return *(GET_TYPE_NN(#x) *)((uintptr_t)this + CONCAT(x, _ptrdiff)); \
}
#define NETWORKED_VARIABLE_DEDUCE_SI(x) \
GET_TYPE_SI(#x) & x() \
{ \
static ptrdiff_t CONCAT(x, _ptrdiff) = g_pNetvars->Get(HASH(hHash, HASH(#x)); \
return *(GET_TYPE_SI(#x) *)((uintptr_t)this + CONCAT(x, _ptrdiff)); \
}
#define NETWORKED_VARIABLE_SPECIFIER(t, x) \
t &x() \
{ \
static ptrdiff_t CONCAT(x, _ptrdiff) = g_pNetvars->Get(hHash, HASH(#x)); \
return *(t *)((uintptr_t)this + CONCAT(x, _ptrdiff)); \
}
#define OFFSET(t, n, x) \
t &n() \
{ \
static ptrdiff_t CONCAT(n, _ptrdiff) = x; \
return *(t *)((uintptr_t)this + CONCAT(n, _ptrdiff)); \
}
// ========================================================================================================================================
inline std::unique_ptr<Netvars_t> g_pNetvars; | [
"cristei.g772@gmail.com"
] | cristei.g772@gmail.com |
73e1faad36394deed4df62e2e08d915ce2747275 | a06a9ae73af6690fabb1f7ec99298018dd549bb7 | /_Library/_Include/boost/phoenix/function/detail/preprocessed/function_result_of_20.hpp | 1f9d2679937410250c4b07314edcfd3dc0d33bc0 | [] | no_license | longstl/mus12 | f76de65cca55e675392eac162dcc961531980f9f | 9e1be111f505ac23695f7675fb9cefbd6fa876e9 | refs/heads/master | 2021-05-18T08:20:40.821655 | 2020-03-29T17:38:13 | 2020-03-29T17:38:13 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 11,077 | hpp | ////////////////////////////////////////////////////////////////////////////////
// function_result_of_20.hpp
/*==============================================================================
Copyright (c) 2005-2010 Joel de Guzman
Copyright (c) 2010 Thomas Heller
Distributed under the Boost Software License, Version 1.0. (See accompanying
file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
==============================================================================*/
template <typename F, typename A0>
struct function<F, A0>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0
>
{};
template <typename F, typename A0 , typename A1>
struct function<F, A0 , A1>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1
>
{};
template <typename F, typename A0 , typename A1 , typename A2>
struct function<F, A0 , A1 , A2>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3>
struct function<F, A0 , A1 , A2 , A3>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4>
struct function<F, A0 , A1 , A2 , A3 , A4>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18
>
{};
template <typename F, typename A0 , typename A1 , typename A2 , typename A3 , typename A4 , typename A5 , typename A6 , typename A7 , typename A8 , typename A9 , typename A10 , typename A11 , typename A12 , typename A13 , typename A14 , typename A15 , typename A16 , typename A17 , typename A18 , typename A19>
struct function<F, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19>
: proto::result_of::make_expr<
proto::tag::function
, phoenix_domain
, F
, A0 , A1 , A2 , A3 , A4 , A5 , A6 , A7 , A8 , A9 , A10 , A11 , A12 , A13 , A14 , A15 , A16 , A17 , A18 , A19
>
{};
/////////////////////////////////////////////////
// vnDev.Games - Trong.LIVE - DAO VAN TRONG //
////////////////////////////////////////////////////////////////////////////////
| [
"adm.fael.hs@gmail.com"
] | adm.fael.hs@gmail.com |
939073373d4c1b6f0eeace371c23edf02bc1336c | 62e1b9f58e9d4c6d1a1297e3b1aa4f76e850a5c1 | /FrameWork/RedRocketRobotTwoSidedNonLinear.h | 878c47b9f91b3253b948b677d16f1bcf26dffa70 | [] | no_license | luon-dinh/GameUIT | 15d926f075bd3e24c3d39d439b747fd7b68e5a7f | 0e3b68352b7671015d041158766c9eae777002bf | refs/heads/master | 2022-02-22T02:20:48.138841 | 2019-08-16T08:28:24 | 2019-08-16T08:28:24 | 196,821,666 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 691 | h | #pragma once
#include "RedRocketRobotTwoSided.h"
#include "SceneManager.h"
#include "BulletRedRocketNonLinear.h"
class RedRocketRobotTwoSidedNonLinear : public RedRocketRobotTwoSided
{
public:
RedRocketRobotTwoSidedNonLinear(int posX, int posY) : RedRocketRobotTwoSided(posX, posY) {};
~RedRocketRobotTwoSidedNonLinear() {};
protected:
void Fire() override
{
if (robotState == State::DUCKING)
SceneManager::getInstance()->AddObjectToCurrentScene(new BulletRedRocketNonLinear(direction, this->pos.x, this->pos.y, rocketSpeed));
else
SceneManager::getInstance()->AddObjectToCurrentScene(new BulletRedRocketNonLinear(direction, this->pos.x, this->pos.y + 15, rocketSpeed));
}
}; | [
"luubieunghi@gmail.com"
] | luubieunghi@gmail.com |
5e178e90bd6d7bd93cc7f08a21ecc49293b50ce9 | d1c881eb5ad77cbd5dab55d34e8f4fa0a4faaf29 | /src/arngui/ArchiveDialog.cpp | 43b8a9747224ba2343ca01ecf96157bd628df749 | [] | no_license | karlvr/arnold | cc21c3de7f92085ff6686044722ea35cfc684bfa | 52a1ef6f3258b1d355c137e71a3bfabfb4f9b2e7 | refs/heads/main | 2023-06-23T04:30:48.554401 | 2023-06-12T10:08:42 | 2023-06-12T10:08:42 | 343,978,478 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 19,528 | cpp | /*
* Arnold emulator (c) Copyright, Kevin Thacker 1995-2015
*
* This file is part of the Arnold emulator source code distribution.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "ArchiveDialog.h"
#include <wx/xrc/xmlres.h>
#include "arnguiApp.h"
#include <wx/wfstream.h>
ArchiveListCtrl::ArchiveListCtrl() : SortableListCtrl()
{
}
ArchiveListCtrl::~ArchiveListCtrl()
{
}
int ArchiveListCtrl::PerformSort(wxIntPtr item1id, wxIntPtr item2id)
{
ArchiveListItem *pClientData1 = (ArchiveListItem *)item1id;
ArchiveListItem *pClientData2 = (ArchiveListItem *)item2id;
int result = 0;
switch (m_nSortColumn)
{
default:
case 0:
{
// name.
// directory comes first
if (pClientData1->m_bIsDirectory && !pClientData2->m_bIsDirectory)
{
result = -1;
}
else if (!pClientData1->m_bIsDirectory && pClientData2->m_bIsDirectory)
{
result = 1;
}
else
{
// a>b
// a==b
// a<b
#if defined(__WXMSW__) || defined(__WXMAC__)
// case is not important
result = pClientData1->m_sDisplayName.CmpNoCase(pClientData2->m_sDisplayName);
#else
// case is important
result = pClientData1->m_sDisplayName.Cmp(pClientData2->m_sDisplayName);
#endif
}
}
break;
case 1:
{
// size
if ((!pClientData1->m_bIsDirectory) && (!pClientData2->m_bIsDirectory))
{
// files
result = pClientData1->m_nSize - pClientData2->m_nSize;
}
else
{
result = 0;
}
}
break;
case 2:
{
// directory comes first
if (pClientData1->m_bIsDirectory && !pClientData2->m_bIsDirectory)
{
result = -1;
}
else if (!pClientData1->m_bIsDirectory && pClientData2->m_bIsDirectory)
{
result = 1;
}
else
{
// both files
result = 0;
}
}
break;
}
if (!m_bSortAscending)
{
result = -result;
}
return result;
}
IMPLEMENT_DYNAMIC_CLASS(ArchiveListCtrl, SortableListCtrl)
BEGIN_EVENT_TABLE(ArchiveListCtrl, SortableListCtrl)
END_EVENT_TABLE()
// there is a leak in this dialog and I can't find where it is.
// seems to hold onto some strings on the items?
BEGIN_EVENT_TABLE(ArchiveDialog, wxDialog)
EVT_LIST_ITEM_ACTIVATED(XRCID("IDC_LIST_FILES"), ArchiveDialog::OnItemActivated)
EVT_BUTTON(XRCID("wxID_OK"), ArchiveDialog::OnOK)
END_EVENT_TABLE()
ArchiveDialog::~ArchiveDialog()
{
wxListCtrl *pListCtrl = GetListCtrl();
// free item data
ClearItems();
// free all items and columns
pListCtrl->ClearAll();
}
wxListCtrl *ArchiveDialog::GetListCtrl()
{
return XRCCTRL(*this, "IDC_LIST_FILES", wxListCtrl);
}
void ArchiveDialog::HandlePickedItem(const wxListItem &Item)
{
const ArchiveListItem *pClientData = (const ArchiveListItem *)(Item.GetData());
bool IsBackDir = false;
bool IsDir = pClientData->m_bIsDirectory;
if (pClientData->m_sInternalName.CompareTo(wxT("..")) == 0)
{
IsBackDir = true;
}
if (IsDir)
{
bool bPopulate = false;
if (IsBackDir)
{
// go back up a dir
if (m_sBasePath.IsEmpty())
{
// indicate we want to back out
m_sPickedFilename.Empty();
// and return ok status
return EndModal(wxID_OK);
}
else
{
wxFileName Filename(m_sBasePath);
Filename.RemoveLastDir();
m_sBasePath = Filename.GetFullPath();
bPopulate = true;
}
}
else
{
m_sBasePath = pClientData->m_sInternalName;
bPopulate = true;
}
if (bPopulate)
{
FillList();
}
}
else
{
wxArchiveClassFactory *pArchiveClassFactory;
wxArchiveInputStream *pInputStream;
wxFilterClassFactory *pFilterClassFactory;
GetArchiveClassFactory(m_sFilename, &pFilterClassFactory, &pArchiveClassFactory, &pInputStream);
delete pInputStream;
// generate path
m_sPickedFilename = m_sFilename;
if (pFilterClassFactory != NULL)
{
m_sPickedFilename += wxT("#");
m_sPickedFilename += pFilterClassFactory->GetProtocol();
m_sPickedFilename += wxT(":");
}
m_sPickedFilename += wxT("#");
m_sPickedFilename += pArchiveClassFactory->GetProtocol();
m_sPickedFilename += wxT(":");
m_sPickedFilename += pClientData->m_sInternalName;
//delete pArchiveClassFactory;
//delete pFilterClassFactory;
// file selected, return as if we had clicked ok
return EndModal(wxID_OK);
}
}
void ArchiveDialog::OnItemActivated(wxListEvent& event)
{
const wxListItem &Item = event.GetItem();
HandlePickedItem(Item);
}
bool ArchiveDialog::TransferDataToWindow()
{
wxListCtrl *pListCtrl = GetListCtrl();
pListCtrl->Freeze();
pListCtrl->ClearAll();
wxListItem Column;
Column.SetMask(wxLIST_MASK_TEXT);
Column.SetText(wxT("Name"));
pListCtrl->InsertColumn(0, Column);
Column.SetText(wxT("Size"));
pListCtrl->InsertColumn(1, Column);
Column.SetText(wxT("Type"));
pListCtrl->InsertColumn(2, Column);
pListCtrl->Thaw();
FillList();
wxGetApp().SetColumnSize(pListCtrl, 0);
wxGetApp().SetColumnSize(pListCtrl, 1);
wxGetApp().SetColumnSize(pListCtrl, 2);
// then restore actual size
wxGetApp().ReadConfigWindowSize(wxT("windows/archive/"), this);
wxGetApp().ReadConfigListCtrl(wxT("windows/archive/listctrl/"), pListCtrl);
wxGetApp().EnsureWindowVisible(this);
return true;
}
void ArchiveDialog::OnOK(wxCommandEvent& WXUNUSED(event))
{
wxListCtrl *pListCtrl = GetListCtrl();
wxGetApp().WriteConfigWindowSize(wxT("windows/archive/"), this);
wxGetApp().WriteConfigListCtrl(wxT("windows/archive/listctrl/"), pListCtrl);
// must be something selected and it must be a file in order
// for dialog to quit
long item = pListCtrl->GetNextItem(-1,
wxLIST_NEXT_ALL,
wxLIST_STATE_SELECTED);
if (item == -1)
return;
wxListItem Item;
Item.SetMask(wxLIST_MASK_DATA);
Item.SetColumn(0);
Item.SetImage(-1); //Insert a blank icon, not find to insert nothing
Item.SetId(item);
if (pListCtrl->GetItem(Item))
{
HandlePickedItem(Item);
}
}
void ArchiveDialog::AddItemToList(wxListCtrl *pListCtrl, const wxString &sName, const wxString &sInternalName, wxFileOffset nSize, bool bIsDirectory)
{
ArchiveListItem *pClientData = new ArchiveListItem();
pClientData->m_sInternalName = sInternalName;
pClientData->m_bIsDirectory = bIsDirectory;
pClientData->m_nSize = nSize;
if (bIsDirectory)
{
pClientData->m_sDisplayName = wxT("[") + sName + wxT("]");
}
else
{
pClientData->m_sDisplayName = sName;
}
wxListItem Item;
// set name field
Item.SetMask(wxLIST_MASK_TEXT | wxLIST_MASK_DATA);
Item.SetText(pClientData->m_sDisplayName);
Item.SetColumn(0);
Item.SetId(pListCtrl->GetItemCount());
Item.SetImage(-1);
Item.SetData(pClientData);
int ItemId = pListCtrl->InsertItem(Item);
// set size field
Item.SetMask(wxLIST_MASK_TEXT);
Item.SetId(ItemId);
if (bIsDirectory)
{
// no size for directory
Item.SetText(wxEmptyString);
}
else
{
// size of file
Item.SetText(wxFileName::GetHumanReadableSize(nSize, wxEmptyString, 2));
}
Item.SetColumn(1);
pListCtrl->SetItem(Item);
Item.SetMask(wxLIST_MASK_TEXT);
Item.SetText(bIsDirectory ? wxT("Directory") : wxT("File"));
Item.SetColumn(2);
pListCtrl->SetItem(Item);
}
void ArchiveDialog::ClearItems()
{
wxListCtrl *pListCtrl = GetListCtrl();
for (int i = 0; i < pListCtrl->GetItemCount(); i++)
{
ArchiveListItem *pClientData = (ArchiveListItem *)(pListCtrl->GetItemData(i));
delete pClientData;
}
pListCtrl->DeleteAllItems();
}
void ArchiveDialog::FillListI()
{
wxListCtrl *pListCtrl = GetListCtrl();
ClearItems();
// base
AddItemToList(pListCtrl, wxT(".."), wxT(".."), 0, true);
wxArchiveClassFactory *pArchiveClassFactory;
wxFilterClassFactory *pFilterClassFactory;
wxArchiveInputStream *pArchiveInputStream;
ArchiveDialog::GetArchiveClassFactory(m_sFilename, &pFilterClassFactory, &pArchiveClassFactory, &pArchiveInputStream);
if (pArchiveInputStream == NULL)
return;
// a zip file may not have a directory specified separately, it may be specified
// implicitly through a file being in there or through a sub-directory.
wxStringToStringHashMap Folders;
// this goes through all items and it's a bit slow.
wxArchiveEntry *pEntry = pArchiveInputStream->GetNextEntry();
while (pEntry!=NULL)
{
// get internal name
wxString sInternalName = pEntry->GetName();
bool bInclude = false;
// base path defined?
if (m_sBasePath.Length() != 0)
{
// the name is at least as long as the base path
if (sInternalName.Length() >= m_sBasePath.Length())
{
// see if it begins with the path we are interested in
wxString sCommonSubString = sInternalName.Left(m_sBasePath.Length());
#if wxUSE_UNICODE==1
const wchar_t *ch = m_sBasePath.wc_str();
#else
const char *ch = m_sBasePath.c_str();
#endif
if (sCommonSubString.CompareTo(ch) == 0)
{
bInclude = true;
}
}
}
else
{
bInclude = true;
}
if (bInclude)
{
wxString sName = sInternalName.Right(sInternalName.Length() - m_sBasePath.Length());
wxFileName FileName(sName);
// add the first dir to the list if it's not already there.
if (FileName.GetDirCount() != 0)
{
const wxArrayString &sDirs = FileName.GetDirs();
sName = sDirs[0];
wxStringToStringHashMap::const_iterator iterator = Folders.find(sName);
if (iterator==Folders.end())
{
wxFileName DirName(sInternalName);
wxString sDirName = DirName.GetPathWithSep();
Folders[sName] = sDirName;
}
}
else if (!FileName.IsDir())
{
// a file in this directory
sName = FileName.GetFullName();
// get size of file
wxFileOffset nSize = pEntry->GetSize();
// add to list
AddItemToList(pListCtrl, sName, sInternalName, nSize, false);
}
}
delete pEntry;
pEntry = pArchiveInputStream->GetNextEntry();
}
// add all folders discovered
wxStringToStringHashMap::iterator iterator = Folders.begin();
for (;iterator!=Folders.end(); ++iterator)
{
AddItemToList(pListCtrl, iterator->first, iterator->second, 0, true);
}
delete pArchiveInputStream;
//delete pArchiveClassFactory;
//delete pFilterClassFactory;
}
void ArchiveDialog::FillList()
{
wxListCtrl *pListCtrl = GetListCtrl();
pListCtrl->Freeze();
// fill list
FillListI();
// if we had a base path specified
if (m_sBasePath.Length() != 0)
{
// but we only added ".."
if (pListCtrl->GetItemCount() == 1)
{
// then clear the base path
m_sBasePath.Empty();
// and re-fill
FillListI();
// perhaps the path no longer exists in that zip
}
}
pListCtrl->Thaw();
}
bool ArchiveDialog::DoPicking(wxWindow *pParentWindow, const wxString &sArchiveFileAndPath, const wxString &sTitle, wxString &sPickedFilename)
{
// this will hold a list of archives, so we can browse an archive inside an archive.
wxArrayString sPaths;
// this is the current archive to view
wxString sCurrentArchiveFileAndPath = sArchiveFileAndPath;
bool bCancelPicking = false;
bool bPickingDone = false;
while (!bPickingDone)
{
wxString sArchivePath;
wxString sArchiveInitialPath;
sArchivePath = sCurrentArchiveFileAndPath;
// if an archive is within an archive this breaks
int nPos = sCurrentArchiveFileAndPath.Find(wxT('#'), true);
if (nPos != -1)
{
wxString sPath = sCurrentArchiveFileAndPath.Left(nPos);
// store path of archive on filesystem, we need it for browsing archives
sArchivePath = sPath;
wxString sArchiveInternalPath = sCurrentArchiveFileAndPath.Right(sCurrentArchiveFileAndPath.Length() - (nPos + 1));
nPos = sArchiveInternalPath.Index(':');
if (nPos != -1)
{
// we need to remove "zip:" stub
sArchiveInitialPath = sArchiveInternalPath.Right(sArchiveInternalPath.Length() - (nPos + 1));
wxFileName ArchiveInternalPath(sArchiveInitialPath);
sArchiveInitialPath = ArchiveInternalPath.GetPath();
}
}
ArchiveDialog dialog(pParentWindow, sTitle, sArchivePath, sArchiveInitialPath);
if (dialog.ShowModal() == wxID_OK)
{
// we picked something
if (dialog.m_sPickedFilename.IsEmpty())
{
// user wanted to back out
//
// if there are no paths in our list, we've completely done with archive picking
if (sPaths.GetCount()==0)
{
bPickingDone = true;
}
else
{
// there is a path in our list, go back to previous archive.
size_t nLastIndex = sPaths.GetCount();
// get archive path off the end of the list.
sCurrentArchiveFileAndPath = sPaths[nLastIndex];
// remove from list
sPaths.RemoveAt(nLastIndex);
}
}
else
{
// user picked something
#if 0
if (IsArchive(pArchiveDialog->m_sPickedFilename))
{
// we picked an archive
// store current one in the list so we can go back to it if we back out of the
// picked archive
sPaths.Add(sCurrentArchiveFileAndPath);
// make the picked archive the new archive
sCurrentArchiveFileAndPath = pArchiveDialog->m_sPickedFilename;
}
else
#endif
{
// a file within an archive which we've picked
sPickedFilename = dialog.m_sPickedFilename;
// done picking without cancelling
bPickingDone = true;
}
}
}
else
{
// done picking
bPickingDone = true;
// cancelled it.
bCancelPicking = true;
}
}
return !bCancelPicking;
}
ArchiveDialog::ArchiveDialog(wxWindow *pParent, const wxString &sTitle, const wxString &sFilename, const wxString &sArchiveInitialPath) : wxDialog()
{
m_sBasePath = sArchiveInitialPath;
if (m_sBasePath.Length() != 0)
{
if (m_sBasePath[m_sBasePath.Length() - 1] != wxT('\\'))
m_sBasePath += wxT("\\");
}
m_sFilename = sFilename;
// load the resource
wxXmlResource::Get()->LoadDialog(this, pParent, wxT("DLG_DIALOG_ARCHIVE"));
if (sTitle.Length() != 0)
{
wxString sActualTitle = sTitle + wxT(" - Browse Archive");
SetTitle(sActualTitle);
}
}
void ArchiveDialog::GetFilterClassFactory(const wxString &sFilename, wxFilterClassFactory **ppFilterClassFactory, wxFilterInputStream **ppInputStream)
{
*ppFilterClassFactory = NULL;
*ppInputStream = NULL;
// make it lower otherwise wxWidgets attempts to find "ZIP" on linux and fails.
wxString sCurrentFilename = sFilename;
wxString sCurrentFilenameLower = sFilename.Lower();
wxInputStream *pInputStream = new wxFFileInputStream(sCurrentFilename);
if (pInputStream != NULL)
{
const wxFilterClassFactory *pFilterClassFactory = wxFilterClassFactory::Find(sCurrentFilenameLower, wxSTREAM_FILEEXT);
if (pFilterClassFactory)
{
wxFilterInputStream *pNewInputStream = pFilterClassFactory->NewStream(pInputStream);
if (pNewInputStream != NULL)
{
*ppInputStream = pNewInputStream;
*ppFilterClassFactory = const_cast<wxFilterClassFactory *>(pFilterClassFactory);
return;
}
}
delete pInputStream;
}
}
void ArchiveDialog::GetArchiveClassFactory(const wxString &sFilename, wxFilterClassFactory **ppFilterClassFactory, wxArchiveClassFactory **ppArchiveClassFactory, wxArchiveInputStream **ppInputStream)
{
*ppFilterClassFactory = NULL;
*ppArchiveClassFactory = NULL;
*ppInputStream = NULL;
const wxFilterClassFactory *pFilterClassFactory = NULL;
// make it lower otherwise wxWidgets attempts to find "ZIP" on linux and fails.
wxString sCurrentFilename = sFilename;
wxString sCurrentFilenameLower = sFilename.Lower();
// zip has archive only, no filter
// tar.gz has archive and filter.
wxInputStream *pInputStream = new wxFFileInputStream(sCurrentFilename);
if (pInputStream != NULL)
{
pFilterClassFactory = wxFilterClassFactory::Find(sCurrentFilenameLower, wxSTREAM_FILEEXT);
if (pFilterClassFactory)
{
wxFilterInputStream *pNewInputStream = pFilterClassFactory->NewStream(pInputStream);
if (pNewInputStream != NULL)
{
pInputStream = pNewInputStream;
sCurrentFilenameLower = pFilterClassFactory->PopExtension(sCurrentFilenameLower);
}
else
{
pFilterClassFactory = NULL;
}
}
}
const wxArchiveClassFactory *pArchiveClassFactory = wxArchiveClassFactory::Find(sCurrentFilenameLower, wxSTREAM_FILEEXT);
if (pArchiveClassFactory!=NULL)
{
// filter with archive, e.g. .tar.gz
wxArchiveInputStream *pNewInputStream = pArchiveClassFactory->NewStream(pInputStream);
if (pNewInputStream != NULL)
{
*ppInputStream = pNewInputStream;
*ppFilterClassFactory = const_cast<wxFilterClassFactory *>(pFilterClassFactory);
*ppArchiveClassFactory = const_cast<wxArchiveClassFactory *>(pArchiveClassFactory);
return;
}
}
#if 0
if (pFilterClassFactory != NULL)
{
// filter without archive
*ppInputStream = pInputStream;
*ppFilterClassFactory = const_cast<wxFilterClassFactory *>(pFilterClassFactory);
*ppArchiveClassFactory = NULL;
return;
}
#endif
if (pInputStream != NULL)
{
// original stream, or one from the filter
delete pInputStream;
}
}
bool ArchiveDialog::IsArchive(const wxString &sFilename)
{
bool bIsArchive = false;
wxFilterClassFactory *pFilterClassFactory;
wxArchiveClassFactory *pArchiveClassFactory;
wxArchiveInputStream *pInputStream;
ArchiveDialog::GetArchiveClassFactory(sFilename, &pFilterClassFactory, &pArchiveClassFactory, &pInputStream);
if (pArchiveClassFactory != NULL)
{
bIsArchive = true;
}
if (pInputStream != NULL)
{
delete pInputStream;
}
return bIsArchive;
}
bool ArchiveDialog::GetFilterFilename(const wxString &sFilename, wxString &sFilterFilename)
{
wxFilterClassFactory *pFilterClassFactory = NULL;
wxFilterInputStream *pInputStream = NULL;
wxString sCurrentFilename = sFilename;
ArchiveDialog::GetFilterClassFactory(sFilename, &pFilterClassFactory, &pInputStream);
if (pFilterClassFactory != NULL)
{
sFilterFilename = sFilename;
sFilterFilename += wxT("#");
sFilterFilename += pFilterClassFactory->GetProtocol();
// sFilterFilename += wxT(":");
// sCurrentFilename= pFilterClassFactory->PopExtension(sCurrentFilename);
// sFilterFilename += sCurrentFilename;
delete pInputStream;
return true;
}
return false;
}
bool ArchiveDialog::IsFilter(const wxString &sFilename)
{
bool bIsFilter = false;
wxFilterClassFactory *pFilterClassFactory;
wxFilterInputStream *pInputStream;
ArchiveDialog::GetFilterClassFactory(sFilename, &pFilterClassFactory, &pInputStream);
if (pFilterClassFactory != NULL)
{
bIsFilter = true;
}
if (pInputStream != NULL)
{
delete pInputStream;
}
return bIsFilter;
}
| [
"karl@xk72.com"
] | karl@xk72.com |
9ee0b26a3271ec924592ae0a1c553850700b4733 | 411bb518ceabf15bd299b71c18ed026b005de7cf | /project3/snapshot.hpp | e59ec84bc51559ab82e6df34052154cf5b77e9f7 | [] | no_license | clucle/parallel_programming | 035f51eabbaeeb7c0f993e884f8042c2cb14b988 | ae9798f8e977ca80c09568e3c67088c7744b988a | refs/heads/master | 2020-11-25T09:34:36.212817 | 2019-12-02T11:24:53 | 2019-12-02T11:24:53 | 228,599,100 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 344 | hpp | #ifndef C_SNAPSHOT
#define C_SNAPSHOT
#include <vector>
#include "register.hpp"
class WaitFreeSnapshot
{
public:
WaitFreeSnapshot(int N);
~WaitFreeSnapshot();
void update(int tid, int value);
std::shared_ptr<int> scan();
int get_cnt();
private:
SnapValue **collect();
Register **_register;
int _size;
};
#endif
| [
"wjdenwls123@gmail.com"
] | wjdenwls123@gmail.com |
fe5cf350d42ddeb908013f53770e50e2856c46cb | a4d7d54cb930a3ecf7ead1f3eaaf10f11c4dfc53 | /libdbn/Clique.cpp | 4912536831707d170bc5b81f3ac74a8f2cc5d880 | [] | no_license | colortail/libdbn | 914eac3897a65af781fdf1cb09839f2fa78b911d | 1c80da2cd61cc9c2c3a76d6efb458023fd07fa30 | refs/heads/master | 2016-09-15T07:11:02.158832 | 2016-05-04T22:34:11 | 2016-05-04T22:34:11 | 38,933,384 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,855 | cpp | #include "Clique.h"
bool operator<(const UndirectEdge & ue1, const UndirectEdge & ue2) {
if (ue1.i != ue2.i)
return ue1.i < ue2.i;
else
return ue1.j < ue2.j;
}
bool Clique::isEqual(vector<string>& elems) {
if (vars.size() == 0 || vars.size() != elems.size())
return false;
bool isContain = false;
for (set<RandVar>::iterator it = vars.begin();
it != vars.end();
it++) {
isContain = false;
for (uint32_t j = 0; j < elems.size(); j++) {
if (it->name == elems[j]) {
isContain = true;
break;
}
}
if (isContain)
continue;
else
return false;
}
return true;
}
bool Clique::isEqual(set<int>& elems) {
if (vars.size() == 0 || vars.size() != elems.size())
return false;
bool isContain = false;
for (set<RandVar>::iterator it = vars.begin();
it != vars.end();
it++) {
isContain = false;
for (set<int>::iterator setIt = elems.begin(); setIt != elems.end(); setIt++) {
if (it->node == *setIt) {
isContain = true;
break;
}
}
if (isContain)
continue;
else
return false;
}
return true;
}
bool Clique::isContain(set<int>& varset) {
if (this->vars.size() == 0)
return false;
for (set<int>::iterator it = varset.begin();
it != varset.end();
it++) {
RandVar rdVar(*it, string(""));
if (this->vars.find(rdVar) == vars.end()) {
return false;
}
}
return true;
}
bool Clique::isContain(vector<string>& varset) {
if (this->vars.size() == 0)
return false;
bool singleContain = false;
for (vector<string>::iterator it = varset.begin();
it != varset.end();
it++) {
singleContain = false;
for (set<RandVar>::iterator rdvIt = this->vars.begin();
rdvIt != this->vars.end();
rdvIt++) {
if (rdvIt->name == *it) {
singleContain = true;
break;
}
}
if (!singleContain)
return false;
}
return true;
}
void Clique::insert(const RandVar & var) {
vars.insert(var);
}
void Clique::insert(const Factor & factor) {
pots.insert(factor);
}
set<Factor>& Clique::getPots() {
return this->pots;
}
set<string> Clique::containElements(const vector<string> & varset) {
set<string> elems;
for (uint32_t i = 0; i < varset.size(); i++) {
for (set<RandVar>::iterator jIt = this->vars.begin();
jIt != this->vars.end();
jIt++) {
if (jIt->name == varset[i])
elems.insert(varset[i]);
}
}
return elems;
}
Factor Clique::getCliqueInitPotential() {
Factor factor;
for (set<Factor>::iterator it = this->pots.begin();
it != this->pots.end();
it++) {
factor = factor.multiply(*it);
}
return factor;
}
set<RandVar> Clique::joinElement(const Clique & c1, const Clique & c2) {
set<RandVar> setvar1 = c1.vars;
set<RandVar> setvar2 = c2.vars;
return setIntersection(setvar1, setvar2);
}
void Clique::removeTabular(const Factor & factor) {
this->pots.erase(factor);
}
void Clique::clearTabular() {
this->pots.clear();
} | [
"tangoghvan@126.com"
] | tangoghvan@126.com |
55ec781632e916a37a9b172580544a007b089eaf | bd48897ed08ecfea35d8e00312dd4f9d239e95c4 | /contest/solved/ECNA-2009/solution/G.cpp | c5fd51df075efa4ef462e294f90be4cca78cff6f | [] | no_license | blmarket/lib4bpp | ab83dbb95cc06e7b55ea2ca70012e341be580af1 | 2c676543de086458b93b1320b7b2ad7f556a24f7 | refs/heads/master | 2021-01-22T01:28:03.718350 | 2010-11-30T06:45:42 | 2010-11-30T06:45:42 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,900 | cpp | #include <iostream>
using namespace std;
/*
IDEA: associate startup times and wait times at lights with paths leading
away from vertices. Must be careful to distinguish this added on time with
true arrival time at vertices.
*/
const int MAXVERT = 100;
const int STARTUPTIME = 5;
const long INF = 10000000;
struct edge {
int time;
int end;
};
struct light
{
int g, y, r;
int cycleTime; // convenience
int goTime; // convenience
int start; // may be used later
edge adj[MAXVERT];
int nadj;
long dist; // arrival time plus wait time for light + startup time when leaving light
long arrival; // actual time of arrival at the light
bool visited;
} lights[MAXVERT];
int timeToGo(int light, int t)
{
int go = t%lights[light].cycleTime;
if (go < lights[light].goTime)
return 0;
else
return lights[light].cycleTime - go + STARTUPTIME;
}
int calcDist(edge e, int t)
{
int dist = t+e.time;
return dist + timeToGo(e.end, dist);
}
int getNextVertex(int n)
{
int ans = -1;
int closest = INF;
for(int i=0; i<n; i++) {
if (!lights[i].visited && lights[i].dist < closest) {
ans = i;
closest = lights[i].dist;
}
}
return ans;
}
int solve(int n, int endLight)
{
while (true) {
int index = getNextVertex(n);
if (index == endLight)
return lights[index].arrival;
if (index == -1)
return INF;
lights[index].visited = true;
for(int i=0; i<lights[index].nadj; i++) {
edge e = lights[index].adj[i];
if (lights[e.end].visited)
continue;
int edgeDist = calcDist(e, lights[index].dist);
if (lights[e.end].dist > edgeDist ||
(lights[e.end].dist == edgeDist && lights[e.end].arrival > lights[index].dist+e.time) ||
(e.end == endLight && lights[e.end].arrival > lights[index].dist+e.time)) {
lights[e.end].dist = edgeDist;
if (lights[e.end].arrival > lights[index].dist + e.time)
lights[e.end].arrival = lights[index].dist + e.time;
}
}
}
return 0;
}
void outputAns(int ans)
{
int mins = ans/60;
int secs = ans%60;
cout << mins << ":";
if (secs < 10)
cout << '0';
cout << secs << endl;
}
int main()
{
int n, m, s, e;
int i;
cin >> n >> m >> s >> e;
while (n != 0) {
for(i=0; i<n; i++) {
cin >> lights[i].g >> lights[i].y >> lights[i].r;
lights[i].cycleTime = lights[i].g+lights[i].y+lights[i].r;
lights[i].goTime = lights[i].g+lights[i].y;
lights[i].dist = INF;
lights[i].arrival = INF;
lights[i].visited = false;
lights[i].nadj = 0;
}
for(i=0; i<m; i++) {
int l1, l2, time;
cin >> l1 >> l2 >> time;
lights[l1].adj[lights[l1].nadj].end = l2;
lights[l1].adj[lights[l1].nadj].time = time;
lights[l1].nadj++;
lights[l2].adj[lights[l2].nadj].end = l1;
lights[l2].adj[lights[l2].nadj].time = time;
lights[l2].nadj++;
}
lights[s].dist = STARTUPTIME;
lights[s].arrival = 0;
int ans = solve(n, e);
outputAns(ans);
cin >> n >> m >> s >> e;
}
}
| [
"blmarket@dbb752b6-32d3-11de-9d05-31133e6853b1"
] | blmarket@dbb752b6-32d3-11de-9d05-31133e6853b1 |
c6637f5b052c5aa919f72142393ce519fb33bf4d | 246789f077d6166acd0880ff28d0cbcc756f1c2e | /src/governance-vote.h | 1285e7b4ae91b7ddc8515da528644653cb121ae1 | [
"MIT"
] | permissive | Jerthade/SteelHorseCoin | ea8b86abd625eaea63aea994b322f28bc2d27e5e | 0fe0d0d742265fea862345ede0498ddba8a10e12 | refs/heads/master | 2020-04-29T05:45:18.091259 | 2019-03-18T03:11:24 | 2019-03-18T03:11:24 | 174,908,499 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 4,642 | h | // Copyright (c) 2014-2017 The Dash Core developers
// Copyright (c) 2019 The SteelHorseCoin Core developers
// Distributed under the MIT/X11 software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#ifndef GOVERNANCE_VOTE_H
#define GOVERNANCE_VOTE_H
#include "key.h"
#include "primitives/transaction.h"
#include <boost/lexical_cast.hpp>
class CGovernanceVote;
class CConnman;
// INTENTION OF MASTERNODES REGARDING ITEM
enum vote_outcome_enum_t {
VOTE_OUTCOME_NONE = 0,
VOTE_OUTCOME_YES = 1,
VOTE_OUTCOME_NO = 2,
VOTE_OUTCOME_ABSTAIN = 3
};
// SIGNAL VARIOUS THINGS TO HAPPEN:
enum vote_signal_enum_t {
VOTE_SIGNAL_NONE = 0,
VOTE_SIGNAL_FUNDING = 1, // -- fund this object for it's stated amount
VOTE_SIGNAL_VALID = 2, // -- this object checks out in sentinel engine
VOTE_SIGNAL_DELETE = 3, // -- this object should be deleted from memory entirely
VOTE_SIGNAL_ENDORSED = 4, // -- officially endorsed by the network somehow (delegation)
};
static const int MAX_SUPPORTED_VOTE_SIGNAL = VOTE_SIGNAL_ENDORSED;
/**
* Governance Voting
*
* Static class for accessing governance data
*/
class CGovernanceVoting
{
public:
static vote_outcome_enum_t ConvertVoteOutcome(const std::string& strVoteOutcome);
static vote_signal_enum_t ConvertVoteSignal(const std::string& strVoteSignal);
static std::string ConvertOutcomeToString(vote_outcome_enum_t nOutcome);
static std::string ConvertSignalToString(vote_signal_enum_t nSignal);
};
//
// CGovernanceVote - Allow a masternode node to vote and broadcast throughout the network
//
class CGovernanceVote
{
friend bool operator==(const CGovernanceVote& vote1, const CGovernanceVote& vote2);
friend bool operator<(const CGovernanceVote& vote1, const CGovernanceVote& vote2);
private:
bool fValid; //if the vote is currently valid / counted
bool fSynced; //if we've sent this to our peers
int nVoteSignal; // see VOTE_ACTIONS above
COutPoint masternodeOutpoint;
uint256 nParentHash;
int nVoteOutcome; // see VOTE_OUTCOMES above
int64_t nTime;
std::vector<unsigned char> vchSig;
/** Memory only. */
const uint256 hash;
void UpdateHash() const;
public:
CGovernanceVote();
CGovernanceVote(const COutPoint& outpointMasternodeIn, const uint256& nParentHashIn, vote_signal_enum_t eVoteSignalIn, vote_outcome_enum_t eVoteOutcomeIn);
bool IsValid() const { return fValid; }
bool IsSynced() const { return fSynced; }
int64_t GetTimestamp() const { return nTime; }
vote_signal_enum_t GetSignal() const { return vote_signal_enum_t(nVoteSignal); }
vote_outcome_enum_t GetOutcome() const { return vote_outcome_enum_t(nVoteOutcome); }
const uint256& GetParentHash() const { return nParentHash; }
void SetTime(int64_t nTimeIn) { nTime = nTimeIn; UpdateHash(); }
void SetSignature(const std::vector<unsigned char>& vchSigIn) { vchSig = vchSigIn; }
bool Sign(const CKey& keyMasternode, const CPubKey& pubKeyMasternode);
bool CheckSignature(const CPubKey& pubKeyMasternode) const;
bool IsValid(bool fSignatureCheck) const;
void Relay(CConnman& connman) const;
std::string GetVoteString() const {
return CGovernanceVoting::ConvertOutcomeToString(GetOutcome());
}
const COutPoint& GetMasternodeOutpoint() const { return masternodeOutpoint; }
/**
* GetHash()
*
* GET UNIQUE HASH WITH DETERMINISTIC VALUE OF THIS SPECIFIC VOTE
*/
uint256 GetHash() const;
uint256 GetSignatureHash() const;
std::string ToString() const;
ADD_SERIALIZE_METHODS;
template <typename Stream, typename Operation>
inline void SerializationOp(Stream& s, Operation ser_action) {
int nVersion = s.GetVersion();
if (nVersion == 70208 && (s.GetType() & SER_NETWORK)) {
// converting from/to old format
CTxIn txin{};
if (ser_action.ForRead()) {
READWRITE(txin);
masternodeOutpoint = txin.prevout;
} else {
txin = CTxIn(masternodeOutpoint);
READWRITE(txin);
}
} else {
// using new format directly
READWRITE(masternodeOutpoint);
}
READWRITE(nParentHash);
READWRITE(nVoteOutcome);
READWRITE(nVoteSignal);
READWRITE(nTime);
if (!(s.GetType() & SER_GETHASH)) {
READWRITE(vchSig);
}
if (ser_action.ForRead())
UpdateHash();
}
};
#endif
| [
"jerthade@gmail.com"
] | jerthade@gmail.com |
cab78f2be972a536b397795aa9e74ae008460531 | bf18c2433f20db4991c32f0db7d81c437bb2f445 | /tests_meego/FinalPackageTest.h | 4fdf7383847c4fb7ad7a5059b937fb8599aacfe7 | [] | no_license | philippedeswert/buteo-syncml | 0c75be0e4fc027146d2ce620c53d76f887055198 | 20cb210d84057860699f0090cba141416a536908 | refs/heads/master | 2021-01-15T13:51:51.335711 | 2014-09-16T07:18:44 | 2014-09-16T07:18:44 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,847 | h | /*
* This file is part of buteo-syncml package
*
* Copyright (C) 2010 Nokia Corporation. All rights reserved.
*
* Contact: Sateesh Kavuri <sateesh.kavuri@nokia.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Nokia Corporation nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef FINALPACKAGETEST_H
#define FINALPACKAGETEST_H
#include <QTest>
class FinalPackageTest : public QObject
{
Q_OBJECT
private slots:
void testPackage();
};
#endif // FINALPACKAGETEST_H
| [
"sergey.gerasimenko@nokia.com"
] | sergey.gerasimenko@nokia.com |
a2943413355946c0f1f0aa3865e0c2a6818f2875 | aa4cfb38ac4cea8c981d0cf9a8fae1716528989a | /18/18.5/Person.cpp | cc929cf9b1a3aea1e6f52f9fdce51c145b38ac31 | [] | no_license | AlexandrViktorovich/PNRPU_Labs | 79ddad126f5a430a769822d527f5e118ea33b63a | fa15bfc26c936c42973fdf67cfe60be32005bf9e | refs/heads/master | 2023-05-10T00:36:39.134588 | 2021-05-31T09:16:17 | 2021-05-31T09:16:17 | 319,021,536 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 806 | cpp | #include "Person.h"
Person::Person(void)
{
name = "";
age = 0;
}
Person::~Person(void)
{
}
Person::Person(string M, int C)
{
name = M;
age = C;
}
Person::Person(const Person& person)
{
name = person.name;
age = person.age;
}
void Person::set_name(string M)
{
name = M;
}
void Person::set_age(int C)
{
age = C;
}
Person& Person::operator=(const Person& c)
{
if (&c == this)return *this;
name = c.name;
age = c.age;
return *this;
}
istream& operator>>(istream& in, Person& c)
{
cout << "\nname: "; in >> c.name;
cout << "\nage: "; in >> c.age;
return in;
}
ostream& operator<<(ostream& out, const Person& c)
{
out << "\nNAME: " << c.name;
out << "\nAGE: " << c.age;
cout << "\n";
return out;
}
void Person::Show()
{
cout << "\nNAME: " << name;
cout << "\nAGE: " << age;
cout << "\n";
} | [
"kent1234810@gmail.com"
] | kent1234810@gmail.com |
aa09d0db9da0e254d78a43dad61be573d1484fe0 | 7e7f76ba6bd0450f1cc082b7b7e2bea1a136c21e | /minesweeper (1).cpp | 5055b5949e2fe4181be98b198a599f35c1881e1d | [] | no_license | sneha1608/minesweeper | 11296964384b912cd3008dbbd5e93850adc29f1c | c91645fddbed6b1114306e0ea3e276845f053615 | refs/heads/master | 2020-11-26T22:35:43.401421 | 2019-12-20T08:09:57 | 2019-12-20T08:09:57 | 229,219,749 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 15,509 | cpp | #include<glut.h>
#include<iostream>
#include<stdlib.h>
#include<iomanip>
using namespace std;
enum {UNCOVER,COVER,MINE=-1,BLANK,ADJACENT,LOSE=-1,UNKNOWN,WIN,IN_PROGRESS,GAME_OVER};
const int dim=10,mines=10;
const int max_x=100,max_y=100,xint=max_x/dim,yint=max_y/dim;
int flag=0;
class cell
{
public :
int content ;
//0 blank,1-8 adjacent mines,-1 mine
int status;
//0 uncovered ,1 covered
cell()
{
content=BLANK;status=COVER;
}
};
cell board[dim][dim];
class GAMEPLAY
{
public:
int flagged,uncovered,covered,status,result;
int mine_pos[mines][2],mine_count;
GAMEPLAY()
{
uncovered=0;
covered=dim*dim;
result=UNKNOWN;
mine_count=0;
for(int i=0;i<mines;i++)
for(int j=0;j<2;j++)
mine_pos[i][j]=-1; //make two dimentional array to store the position of mines
}
void add_mine(int i,int j)
{
mine_pos[mine_count][0]=i;
mine_pos[mine_count][1]=j;
mine_count++;
}
int check()
{
if(result!=UNKNOWN)
return result;
if(covered==10)
{
status=GAME_OVER;
result=WIN;
return WIN;
}
for(int i=0;i<mines;i++)
{
if(UNCOVER==board[mine_pos[i][0]][mine_pos[i][1]].status)
{
status=GAME_OVER;
result=LOSE;
return LOSE;
}
}
return UNKNOWN;
}
};
GAMEPLAY game_stats;
void drawstring(float x,float y, float z ,char *string);
void front(void);
void initiate_board();
void draw_board();
void init_mines(int num,int dim);
void init_mines_adjacent(int num, int dim);
void calc_adjacent_mines(int i,int j,int dim);
void uncover_cell(int i, int j);
void uncover_area(int i, int j);
void uncover_area_check_cell(int k, int l);
void left_click(int i,int j);
void game_over(int result);
void draw_square(int x,int y,int color);
void user_input(int button,int state,int x,int y);
void show_content(int i,int j);
void myKeyboardFunc( unsigned char key, int x, int y );
void gl_init();
void display();
void mydisplay(void);
void printMessage(char *msg);
void printString(char *s);
void makeRasterFont(void);
/**************************main************************/
int main(int argc, char *argv[])
{
glutInit(&argc,argv);
glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB);
glutInitWindowSize(1200,650);
glutInitWindowPosition(100,50);
glutCreateWindow("Minesweeper");
glutDisplayFunc(mydisplay);
glutKeyboardFunc(myKeyboardFunc);
glutMouseFunc(user_input);
gl_init();
initiate_board();
glutMainLoop();
return 1;
}
void mydisplay(void)
{
glClear(GL_COLOR_BUFFER_BIT);
if(flag==0)
front();
if(flag==1)
display();
}
/*********init_mines********/
void init_mines(int num,int dim)
{
int i=0,j=0,count=0;
while(count!=num)
{
i=rand()%dim;
j=rand()%dim;
if(board[i][j].content!=MINE)
{
board[i][j].content=MINE;
count++;
game_stats.add_mine(i,j);
}
}
}
/********init_mine_adjacent*********/
void init_mines_adjacent(int num, int dim)
{
int i,j,count;
for(i=0;i<dim;i++)
for(j=0;j<dim;j++)
{
if(board[i][j].content!=MINE)
{calc_adjacent_mines(i,j,dim);
count++;}
}
}
/********calc_adjacent_mines*********/
void calc_adjacent_mines(int i,int j,int dim)
{
//row above mines
if(i-1>=0)
{
if(j-1>=0)
if(board[i-1][j-1].content==MINE)
board[i][j].content++;
if(board[i-1][j].content==MINE)
board[i][j].content++;
if(j+1<dim)
if(board[i-1][j+1].content==MINE)
board[i][j].content++;
}
if(j-1>=0)
if(board[i][j-1].content==MINE)
board[i][j].content++;
if(j+1<dim)
if(board[i][j+1].content==MINE)
board[i][j].content++;
if(i+1<dim)
{
if(j-1>=0)
if(board[i+1][j-1].content==MINE)
board[i][j].content++;
if(board[i+1][j].content==MINE)
board[i][j].content++;
if(j+1<dim)
if(board[i+1][j+1].content==MINE)
board[i][j].content++;
}
}
/**********initiate_board***********/
void initiate_board()
{
init_mines(mines,dim);
init_mines_adjacent(mines,dim);
}
/***************uncover_cell*******************/
void uncover_cell(int i , int j)
{
switch(board[i][j].content)
{
case MINE : show_content(i,j);
break;
case BLANK : show_content(i,j);
uncover_area(i,j);
break;
case 1 : show_content(i,j);
break;
case 2 : show_content(i,j);
break;
case 3 : show_content(i,j);
break;
case 4 : show_content(i,j);
break;
case 5 : show_content(i,j);
break;
case 6 : show_content(i,j);
break;
case 7 : show_content(i,j);
break;
case 8 : show_content(i,j);
break;
default : break;
}
}
/**********uncover_area_check_cell******************/
void uncover_area_check_cell(int k, int l)
{
if(board[k][l].status==COVER)
{
if(board[k][l].content!=MINE)
{
board[k][l].status=UNCOVER;
game_stats.covered--;
game_stats.uncovered++;
draw_square(k,l,UNCOVER);
show_content(k,l);
if(board[k][l].content==BLANK)
uncover_area(k,l);
}
}
}
/**********uncover_area******************/
void uncover_area(int i, int j)
{
int k=i,l=j;
if(i-1>=0)
uncover_area_check_cell(i-1,j);
if(i+1<dim)
uncover_area_check_cell(i+1,j);
if(j-1>=0)
uncover_area_check_cell(i,j-1);
if(j+1<dim)
uncover_area_check_cell(i,j+1);
}
/************left_click*****************/
void left_click(int i,int j)
{
if(board[i][j].status==COVER)
{
if(UNKNOWN!=game_stats.check())
{
game_over(game_stats.check());
return;
}
board[i][j].status=UNCOVER;
game_stats.covered--;
game_stats.uncovered++;
draw_square(i,j,UNCOVER);
uncover_cell(i,j);
if(UNKNOWN!=game_stats.check())
{
game_over(game_stats.check());
return;
}
}
}
/*************game_over*******************/
void game_over(int result)
{
if(result!=UNKNOWN)
{
glutMouseFunc(NULL);
if(result==WIN)
printMessage("YOU WIN");
else
printMessage("YOU LOSE");
}
}
/**************************************************
Graphic routines
*********************************************/
/**********user_input***********/
void user_input(int button,int state,int x,int y)
{
int square_x=x/120;
int square_y=(650-y)/65;
if(button==GLUT_LEFT_BUTTON&&state==GLUT_DOWN&&flag==1)
{
left_click(square_x,square_y);
}
}
/***********************draw_board**********************/
void draw_board()
{
int x_coord=0,y_coord=0;
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(0.5,0.5,0.5);
glBegin(GL_LINES);
for(x_coord=0;x_coord<=max_x;x_coord+=xint)
{
glVertex3i(x_coord,0,0);
glVertex3i(x_coord,max_y,0);
}
for(y_coord=0;y_coord<=max_y;y_coord+=yint)
{
glVertex3i(0,y_coord,0);
glVertex3i(max_x,y_coord,0);
}
for(int i=0;i<dim;i++)
{
for(int j=0;j<dim;j++)
{
draw_square(i,j,board[i][j].status);
if(board[i][j].status==UNCOVER)
show_content(i,j);
}
}
glEnd();
glFlush();
}
/**********draw_square************/
void draw_square(int i,int j,int color)
{
int x,y;
if(color==COVER)
glColor3f(0.89,0.0,0.0);
else if(color==UNCOVER)
glColor3f(0,1,0);
x=i*xint;
y=j*yint;
glBegin(GL_POLYGON);
glVertex3i(x+1,y+1,0);
glVertex3i(x+1,y+yint-1,0);
glVertex3i(x+xint-1,y+yint-1,0);
glVertex3i(x+xint-1,y+1,0);
glEnd();
glFlush();
}
/************************print_message********************/
void printMessage(char *msg)
{
glColor3f(1.0,0.0,0.0);
glRasterPos2i(xint*4,yint*4);
printString(msg);
glFlush();
}
/*************************show_content****************/
void show_content(int i,int j)
{
char temp=board[i][j].content+48;
if(board[i][j].content==MINE)
temp=77;
if(board[i][j].content==BLANK)
temp=32;
glColor3f(0.0,0.0,0.0);
glRasterPos2i(i*xint+2,j*yint+2);
printString(&temp);
glFlush();
}
/**********gl-init*************/
void gl_init()
{
glClearColor(0.0,0.0,0.0,0);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glOrtho(0.0,100.0,0.0,100.0,-100.0,100.0);
glMatrixMode(GL_MODELVIEW);
glClear(GL_COLOR_BUFFER_BIT);
glFlush();
makeRasterFont();
}
/**********************display *************************/
void display()
{
static int i=0;
draw_board();
game_over(game_stats.check());
}
/*************************Font_display_list*************************/
GLubyte space[] =
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
GLubyte letters[][13] = {
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xff, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18},
{0x00, 0x00, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe},
{0x00, 0x00, 0x7e, 0xe7, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0xfc, 0xce, 0xc7, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc7, 0xce, 0xfc},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0xc0, 0xc0, 0xfc, 0xc0, 0xc0, 0xc0, 0xc0, 0xff},
{0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xfc, 0xc0, 0xc0, 0xc0, 0xff},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xcf, 0xc0, 0xc0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xff, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x7e},
{0x00, 0x00, 0x7c, 0xee, 0xc6, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06},
{0x00, 0x00, 0xc3, 0xc6, 0xcc, 0xd8, 0xf0, 0xe0, 0xf0, 0xd8, 0xcc, 0xc6, 0xc3},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0},
{0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xff, 0xff, 0xe7, 0xc3},
{0x00, 0x00, 0xc7, 0xc7, 0xcf, 0xcf, 0xdf, 0xdb, 0xfb, 0xf3, 0xf3, 0xe3, 0xe3},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xe7, 0x7e},
{0x00, 0x00, 0xc0, 0xc0, 0xc0, 0xc0, 0xc0, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe},
{0x00, 0x00, 0x3f, 0x6e, 0xdf, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c},
{0x00, 0x00, 0xc3, 0xc6, 0xcc, 0xd8, 0xf0, 0xfe, 0xc7, 0xc3, 0xc3, 0xc7, 0xfe},
{0x00, 0x00, 0x7e, 0xe7, 0x03, 0x03, 0x07, 0x7e, 0xe0, 0xc0, 0xc0, 0xe7, 0x7e},
{0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff},
{0x00, 0x00, 0x7e, 0xe7, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0xc3, 0xe7, 0xff, 0xff, 0xdb, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3},
{0x00, 0x00, 0xc3, 0x66, 0x66, 0x3c, 0x3c, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3},
{0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x66, 0x66, 0xc3},
{0x00, 0x00, 0xff, 0xc0, 0xc0, 0x60, 0x30, 0x7e, 0x0c, 0x06, 0x03, 0x03, 0xff}
};
GLubyte digits[][13] = {
{0xff, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81, 0xff},
{0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80},
{0xff, 0x80, 0x80, 0x80, 0x80, 0x80, 0xff, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff},
{0xff, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff},
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x81, 0x81, 0x81, 0x81, 0x81, 0x81},
{0xff, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x80, 0x80, 0x80, 0x80, 0x80, 0xff},
{0xff, 0x81, 0x81, 0x81, 0x81, 0x81, 0xff, 0x80, 0x80, 0x80, 0x80, 0x80, 0xff},
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff},
{0xff, 0x81, 0x81, 0x81, 0x81, 0x81, 0xff, 0x81, 0x81, 0x81, 0x81, 0x81, 0xff},
{0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0xff, 0x81, 0x81, 0x81, 0x81, 0x81, 0xff}
};
GLuint fontOffset;
void makeRasterFont(void)
{
GLuint i, j;
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
fontOffset = glGenLists (128);
for (i = 0,j = 'A'; i < 26; i++,j++) {
glNewList(fontOffset + j, GL_COMPILE);
glBitmap(8, 13, 0.0, 2.0, 10.0, 0.0, letters[i]);
glEndList();
}
for (i = 0,j = '0'; i < 10; i++,j++) {
glNewList(fontOffset + j, GL_COMPILE);
glBitmap(8, 13, 0.0, 2.0, 10.0, 0.0, digits[i]);
glEndList();
}
glNewList(fontOffset + ' ', GL_COMPILE);
glBitmap(8, 13, 0.0, 2.0, 10.0, 0.0, space);
glEndList();
}
void printString(char *s)
{
glPushAttrib (GL_LIST_BIT);
glListBase(fontOffset);
glCallLists(strlen(s), GL_UNSIGNED_BYTE, (GLubyte *) s);
glPopAttrib ();
}
void front(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glColor3f(0,0,1);
drawstring(20.0,90.0,0.0,"COLLEGE OF ENGINEERING");
glColor3f(0.7,0,1);
drawstring(21,82,0.0,"DEPARTMENT OF COMPUTER SCIENCE AND ENGINEERING");
glColor3f(1,0.5,0);
drawstring(38,70,0.0,"A MINI PROJECT ON");
glColor3f(1,0,0);
drawstring(40,60,0.0,"MINESWEEPER");
glColor3f(1,0.5,0);
drawstring(20,50,0.0,"By:");
glColor3f(0.5,0,0.5);
drawstring(10,40,0.0,"Some Name");
drawstring(10,34,0.0,"Some Name");
glColor3f(1,0.5,0);
drawstring(68,50,0.0,"Guides:");
glColor3f(0.5,0.2,0.2);
drawstring(63,40,0.0,"Mr. Some Name");
drawstring(63,34,0.0,"Mr.Some Name");
glColor3f(1,0.1,1);
drawstring(32,10,0.0," Press Enter to start MINESWEEPER");
glFlush();
glutSwapBuffers();
}
void drawstring(float x,float y, float z ,char *string)
{
char *c;
glRasterPos3i(x,y,z);
for(c=string;*c!='\0';c++)
{
glutBitmapCharacter(GLUT_BITMAP_TIMES_ROMAN_24,*c);
}
}
void myKeyboardFunc( unsigned char key, int x, int y )
{
switch(key)
{
case 13:if(flag==0)
flag=1;
break;
case 27 :exit(0);
}
mydisplay();
}
| [
"noreply@github.com"
] | noreply@github.com |
62206c8257077e8ae6a04c51d1dd43f334b4fad8 | db014ccc3f431d02aabaf7e2a1324e8a38527553 | /insertion_sort.cpp | 935f5c139ed6e6cc56d4f8bad3e8fe4afd267fc5 | [] | no_license | KAWSER04/Convex_Hull | 705b1e5978a7c3cf5b6c542098e19e1d899a3c78 | 1e79b11bd8d3181b7fb469a1a5558d1d04cbb108 | refs/heads/master | 2020-04-10T07:15:31.133037 | 2018-12-07T21:19:32 | 2018-12-07T21:19:32 | 160,876,361 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 200 | cpp | #include<bits/stdc++.h>
using namespace std;
int main(){
int arr[]={20,2,5,7,32},i,j,n;
int length = sizeof(arr)/sizeof(int);
for(j=2;j<=arr.length;j++)
key=arr[j];
return 0;
}
| [
"kawser.cse.ru@gmail.com"
] | kawser.cse.ru@gmail.com |
4503d3cfa6bed9a76e91c165a03770ff04d9c9cf | c487c6038913cb68597799365b7e2243a9ff4df4 | /ForwardTranslator.cpp | 6409ec9d34cdede3d51ad0951ab7d6e42852ea22 | [] | no_license | jmarrec/OpenStudio-test-mac-warning | ada2b8b591de5a92ffc351c1c37f106ede0ad45e | 9df66a226c339bd24b61429235f6f981084c8605 | refs/heads/master | 2020-07-07T00:05:55.164623 | 2019-08-19T15:35:59 | 2019-08-19T15:35:59 | 203,180,822 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 312 | cpp | #include "ForwardTranslator.hpp"
#include "Material.hpp"
#include "Material_Impl.hpp"
#include "IdfObject.hpp"
#include "IdfObject_Impl.hpp"
namespace openstudio {
namespace gbxml {
ForwardTranslator::ForwardTranslator()
{
}
ForwardTranslator::~ForwardTranslator()
{
}
} // gbxml
} // openstudio
| [
"julien.marrec@gmail.com"
] | julien.marrec@gmail.com |
7c1bfc7cb394634113b8199d034463f9a2713ca9 | e2eeb3d362c55e678c559e3a85500b26b0a38e77 | /src/SocketNetwork.cpp | 9ba5d1b02aac4efaeea88cc029c35245ad0589f5 | [] | no_license | LJH960101/JHNet | b016f2dc76f291b7b5d929ff71e218a17c7e53cd | 99159c849576457935fb0234a4d5eed8ea78657d | refs/heads/master | 2023-05-07T08:28:10.898085 | 2021-05-31T08:33:21 | 2021-05-31T08:33:21 | 325,768,993 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,863 | cpp | #include "SocketNetwork.h"
#include "Log.h"
#include <thread>
#include <boost/bind.hpp>
#include <boost/asio.hpp>
#include <boost/optional.hpp>
#include <iostream>
IMPLEMENT_SINGLETON(CSocketNetwork);
using boost::asio::ip::tcp;
using namespace std;
class CIOCPSession
{
public:
CIOCPSession(boost::asio::io_context& ioContext_)
: m_socket(ioContext_)
{
}
tcp::socket& GetSocket()
{
return m_socket;
}
void ProcRead()
{
m_socket.async_read_some(boost::asio::buffer(m_Receivebuf, MAX_BUF_LENGTH),
boost::bind(&CIOCPSession::_OnRead, this,
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
}
void ProcWrite(uint32_t size_)
{
m_socket.async_write_some(boost::asio::buffer(m_Sendbuf, size_),
boost::bind(&CIOCPSession::_OnWrite, this,
boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred));
}
private:
void _OnRead(const boost::system::error_code error_, size_t length_)
{
if (!error_)
{
cout << "Receive Complete : " << m_Receivebuf << endl;
ProcRead();
}
else
{
delete this;
}
}
void _OnWrite(const boost::system::error_code error_, size_t length_)
{
if (!error_)
{
cout << "Send Complete : " << m_Sendbuf << endl;
}
else
{
delete this;
}
}
enum { MAX_BUF_LENGTH = 1024 };
tcp::socket m_socket;
char m_Receivebuf[MAX_BUF_LENGTH];
char m_Sendbuf[MAX_BUF_LENGTH];
};
class CIOCPAcceptor
{
public:
CIOCPAcceptor(boost::asio::io_context& ioContext_, short port_)
: m_ioContext(ioContext_), m_acceptor(ioContext_, tcp::endpoint(tcp::v4(), port_))
{
_ProcAccept();
}
private:
void _ProcAccept()
{
CIOCPSession* newSeesion = new CIOCPSession(m_ioContext);
m_acceptor.async_accept(newSeesion->GetSocket(),
boost::bind(&CIOCPAcceptor::_OnAccept, this, newSeesion, boost::asio::placeholders::error));
}
void _OnAccept(CIOCPSession* newSession_, const boost::system::error_code& error_)
{
if (!error_)
{
newSession_->ProcRead();
}
else
{
delete newSession_;
}
_ProcAccept();
}
boost::asio::io_context &m_ioContext;
tcp::acceptor m_acceptor;
};
void CSocketNetwork::_SocketThread(u_int16_t port_){
try
{
CIOCPAcceptor iocpAcceptor(m_ioContext, port_);
m_ioContext.run();
}
catch (exception& e)
{
Log(string("_SocketThread Exception : ") + e.what());
delete m_iocpThread;
m_iocpThread = nullptr;
}
}
bool CSocketNetwork::Start(u_int16_t port_){
if(m_iocpThread!=nullptr){
Log("CSocketNetwork: m_iocpThread already started.");
return false;
}
m_iocpThread = new std::thread(&CSocketNetwork::_SocketThread, this, port_);
return true;
} | [
"ljh960101@gmail.com"
] | ljh960101@gmail.com |
7f9103612e155aa6bdb44324a098e696dd265015 | 96f3768ac967b5ad6f4cfc54fe0a66c2f6e8bdab | /pascal-triangle.cpp | 0d29c1b3b991238aa8a90b9c21b34b9c2d5b7503 | [] | no_license | prashanth-io/c-plus-plus | ee3ea90ae7749d27548227b1fa53047ef4ef03ee | 33f38156d49101b163d1d9f40a147d5a7574231e | refs/heads/main | 2023-07-09T11:43:56.886902 | 2021-08-11T05:06:19 | 2021-08-11T05:06:19 | 386,031,482 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,186 | cpp |
#include<iostream>
using namespace std;
//void pascaltriangle(int n)
//{
// for(int i=1;i<=n;i++)
// {
// int coef=1;
// for(int j=1;j<=i;j++)
// {
// cout<<x;
// x++;
// }
// cout<<endl;
// }
//}
void halfpyramidpattern2(int n)
{
for(int i=n;i>=1;i--)
{
for(int j=1;j<=i;j++)
{
cout<<"*";
}
cout<<endl;
}
}
void pascaltriangle(int n)
{
for(int i=1;i<=n;i++)
{
int coef=1;
for(int k=1;k<=n-i;k++)
{
cout<<" ";
}
for(int j=1;j<=i;j++)
{
cout<<coef<<" ";
coef=coef*(i-j)/j;
}
cout<<endl;
}
}
void halfpyramidpattern4(int n)
{
for(int i=n;i>=1;i--)
{
for(int k=n-i;k>0;k--)
{
cout<<" ";
}
for(int j=1;j<=i;j++)
{
cout<<"*"<<" ";
}
cout<<endl;
}
}
void halfpyramidpattern5(int n)
{
int x=1;
for(int i=1;i<=n;i++)
{
for(int j=1;j<=i;j++)
{
cout<<x;
x++;
}
cout<<endl;
}
}
int main()
{
int num;
cout<<"enter number of levels of the pattern :"<<endl;
cin>>num;
// halfpyramidpattern1(num);
cout<<endl;
cout<<endl;
pascaltriangle(num);
return 0;
}
| [
"noreply@github.com"
] | noreply@github.com |
61311e95606588dd2b2c65c9c76c894127271cfd | 50a8a0c451c72ddea80e0b2546f9c3d802a9f980 | /aten/src/ATen/cuda/LegacyTHFunctionsCUDA.cpp | 6b6974cda1e9f0c69804070911304e1d21f2820d | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | JamesTrick/pytorch | fcd0fe3de4c7bb73928620dd8309ed230aa5c4ce | b84d9b48d84807ca2d77d7180bdcf1f68097c61e | refs/heads/master | 2021-06-24T16:08:10.986880 | 2020-11-30T20:00:47 | 2020-11-30T20:02:40 | 168,422,380 | 1 | 0 | NOASSERTION | 2019-01-30T22:02:24 | 2019-01-30T22:02:23 | null | UTF-8 | C++ | false | false | 280,549 | cpp | #include <ATen/LegacyTHFunctionsCUDA.h>
#include <ATen/ATen.h>
#include <ATen/Utils.h>
#include <ATen/NamedTensorUtils.h>
#include <ATen/CUDAGeneratorImpl.h>
#include <ATen/ExpandUtils.h>
#include <THC/THC.h>
#include <THC/THCTensor.hpp>
#include <THCUNN/THCUNN.h>
#undef THNN_
#undef THCIndexTensor_
#include <ATen/DeviceGuard.h>
#include <ATen/cuda/ATenCUDAGeneral.h>
#include <ATen/cuda/CUDADevice.h>
#include <ATen/cuda/CUDAContext.h>
namespace at {
namespace native {
namespace legacy {
namespace cuda {
namespace {
ScalarType infer_scalar_type(const Tensor & t) {
return t.scalar_type();
}
ScalarType infer_scalar_type(const TensorList & tl) {
TORCH_CHECK(tl.size() > 0, "expected a non-empty list of Tensors");
return tl[0].scalar_type();
}
TensorOptions options(ScalarType s) {
return TensorOptions().dtype(s)
.device(DeviceType::CUDA)
.layout(kStrided);
}
Allocator* allocator() {
return at::cuda::getCUDADeviceAllocator();
}
}
Tensor & _th_masked_fill_(Tensor & self, const Tensor & mask, Scalar value) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toBool();
THCudaBoolTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toByte();
THCudaByteTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toChar();
THCudaCharTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toDouble();
THCudaDoubleTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toFloat();
THCudaTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toInt();
THCudaIntTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toLong();
THCudaLongTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toShort();
THCudaShortTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toHalf();
THCudaHalfTensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_", false, DeviceType::CUDA, ScalarType::Byte);
auto value_ = value.toBFloat16();
THCudaBFloat16Tensor_maskedFill(globalContext().getTHCState(), self_, mask_, value_);
break;
}
default:
AT_ERROR("_th_masked_fill_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_masked_fill_bool_(Tensor & self, const Tensor & mask, Scalar value) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toBool();
THCudaBoolTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toByte();
THCudaByteTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toChar();
THCudaCharTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toDouble();
THCudaDoubleTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toFloat();
THCudaTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toInt();
THCudaIntTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toLong();
THCudaLongTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toShort();
THCudaShortTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toHalf();
THCudaHalfTensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_fill_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_fill_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto value_ = value.toBFloat16();
THCudaBFloat16Tensor_maskedFillBool(globalContext().getTHCState(), self_, mask_, value_);
break;
}
default:
AT_ERROR("_th_masked_fill_bool_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_masked_scatter_(Tensor & self, const Tensor & mask, const Tensor & source) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBoolTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_", false, DeviceType::CUDA, ScalarType::Byte);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBFloat16Tensor_maskedCopy(globalContext().getTHCState(), self_, mask_, source_);
break;
}
default:
AT_ERROR("_th_masked_scatter_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_masked_scatter_bool_(Tensor & self, const Tensor & mask, const Tensor & source) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBoolTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
auto mask_ = checked_dense_tensor_unwrap(mask, "mask", 2, "_th_masked_scatter_bool_", false, DeviceType::CUDA, ScalarType::Bool);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_masked_scatter_bool_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBFloat16Tensor_maskedCopyBool(globalContext().getTHCState(), self_, mask_, source_);
break;
}
default:
AT_ERROR("_th_masked_scatter_bool_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_index_copy_(Tensor & self, int64_t dim, const Tensor & index, const Tensor & source) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBoolTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_copy_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 4, "_th_index_copy_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_indexCopy(globalContext().getTHCState(), self_, dim, index_, source_);
break;
}
default:
AT_ERROR("_th_index_copy_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_put_(Tensor & self, const Tensor & index, const Tensor & source, bool accumulate) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBoolTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 2, "_th_put_", false, DeviceType::CUDA, ScalarType::Long);
auto source_ = checked_dense_tensor_unwrap(source, "source", 3, "_th_put_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_put(globalContext().getTHCState(), self_, index_, source_, accumulate);
break;
}
default:
AT_ERROR("_th_put_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_index_fill_(Tensor & self, int64_t dim, const Tensor & index, Scalar value) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Bool: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toBool();
THCudaBoolTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toByte();
THCudaByteTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toChar();
THCudaCharTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toDouble();
THCudaDoubleTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toFloat();
THCudaTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toInt();
THCudaIntTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toLong();
THCudaLongTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toShort();
THCudaShortTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_index_fill_", false, DeviceType::CUDA, dispatch_scalar_type);
auto index_ = checked_dense_tensor_unwrap(index, "index", 3, "_th_index_fill_", false, DeviceType::CUDA, ScalarType::Long);
auto value_ = value.toHalf();
THCudaHalfTensor_indexFill(globalContext().getTHCState(), self_, dim, index_, value_);
break;
}
default:
AT_ERROR("_th_index_fill_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
std::tuple<Tensor &,Tensor &> _th_mode_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool keepdim) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Char: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Double: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Float: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Int: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Long: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Short: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Half: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_mode_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
default:
AT_ERROR("_th_mode_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(values, indices);
}
std::tuple<Tensor,Tensor> _th_mode(const Tensor & self, int64_t dim, bool keepdim) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto values_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto values = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(values_));
auto indices_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(ScalarType::Long)).release();
auto indices = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(indices_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_mode", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_mode(globalContext().getTHCState(), values_, indices_, self_, dim, keepdim);
break;
}
default:
AT_ERROR("_th_mode not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(values, indices);
}
std::tuple<Tensor &,Tensor &> _th_sort_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t dim, bool descending) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Char: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Double: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Float: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Int: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Long: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Short: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Half: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_sort_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
default:
AT_ERROR("_th_sort_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(values, indices);
}
std::tuple<Tensor,Tensor> _th_sort(const Tensor & self, int64_t dim, bool descending) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto values_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto values = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(values_));
auto indices_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(ScalarType::Long)).release();
auto indices = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(indices_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_sort", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_sort(globalContext().getTHCState(), values_, indices_, self_, dim, descending);
break;
}
default:
AT_ERROR("_th_sort not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(values, indices);
}
std::tuple<Tensor &,Tensor &> _th_topk_out(Tensor & values, Tensor & indices, const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Char: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Double: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Float: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Int: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Long: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Short: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Half: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::BFloat16: {
auto values_ = checked_dense_tensor_unwrap(values, "values", 0, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto indices_ = checked_dense_tensor_unwrap(indices, "indices", 0, "_th_topk_out", false, DeviceType::CUDA, ScalarType::Long);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBFloat16Tensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
default:
AT_ERROR("_th_topk_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(values, indices);
}
std::tuple<Tensor,Tensor> _th_topk(const Tensor & self, int64_t k, int64_t dim, bool largest, bool sorted) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto values_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto values = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(values_));
auto indices_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(ScalarType::Long)).release();
auto indices = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(indices_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_topk", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaBFloat16Tensor_topk(globalContext().getTHCState(), values_, indices_, self_, k, dim, largest, sorted);
break;
}
default:
AT_ERROR("_th_topk not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(values, indices);
}
Tensor & _th_renorm_out(Tensor & result, const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_renorm_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toDouble();
auto maxnorm_ = maxnorm.toDouble();
THCudaDoubleTensor_renorm(globalContext().getTHCState(), result_, self_, p_, dim, maxnorm_);
break;
}
case ScalarType::Float: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_renorm_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toFloat();
auto maxnorm_ = maxnorm.toFloat();
THCudaTensor_renorm(globalContext().getTHCState(), result_, self_, p_, dim, maxnorm_);
break;
}
case ScalarType::Half: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_renorm_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toHalf();
auto maxnorm_ = maxnorm.toHalf();
THCudaHalfTensor_renorm(globalContext().getTHCState(), result_, self_, p_, dim, maxnorm_);
break;
}
default:
AT_ERROR("_th_renorm_out not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor _th_renorm(const Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto result_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto result = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(result_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toDouble();
auto maxnorm_ = maxnorm.toDouble();
THCudaDoubleTensor_renorm(globalContext().getTHCState(), result_, self_, p_, dim, maxnorm_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toFloat();
auto maxnorm_ = maxnorm.toFloat();
THCudaTensor_renorm(globalContext().getTHCState(), result_, self_, p_, dim, maxnorm_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toHalf();
auto maxnorm_ = maxnorm.toHalf();
THCudaHalfTensor_renorm(globalContext().getTHCState(), result_, self_, p_, dim, maxnorm_);
break;
}
default:
AT_ERROR("_th_renorm not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor & _th_renorm_(Tensor & self, Scalar p, int64_t dim, Scalar maxnorm) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm_", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toDouble();
auto maxnorm_ = maxnorm.toDouble();
THCudaDoubleTensor_renorm(globalContext().getTHCState(), self_, self_, p_, dim, maxnorm_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm_", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toFloat();
auto maxnorm_ = maxnorm.toFloat();
THCudaTensor_renorm(globalContext().getTHCState(), self_, self_, p_, dim, maxnorm_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_renorm_", false, DeviceType::CUDA, dispatch_scalar_type);
auto p_ = p.toHalf();
auto maxnorm_ = maxnorm.toHalf();
THCudaHalfTensor_renorm(globalContext().getTHCState(), self_, self_, p_, dim, maxnorm_);
break;
}
default:
AT_ERROR("_th_renorm_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_fmod_out(Tensor & result, const Tensor & self, Scalar other) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toByte();
THCudaByteTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Char: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toChar();
THCudaCharTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Double: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toDouble();
THCudaDoubleTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Float: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toFloat();
THCudaTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Int: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toInt();
THCudaIntTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Long: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toLong();
THCudaLongTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Short: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toShort();
THCudaShortTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Half: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toHalf();
THCudaHalfTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
default:
AT_ERROR("_th_fmod_out not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor _th_fmod(const Tensor & self, Scalar other) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto result_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto result = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(result_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toByte();
THCudaByteTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toChar();
THCudaCharTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toDouble();
THCudaDoubleTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toFloat();
THCudaTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toInt();
THCudaIntTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toLong();
THCudaLongTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toShort();
THCudaShortTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toHalf();
THCudaHalfTensor_fmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
default:
AT_ERROR("_th_fmod not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor & _th_fmod_out(Tensor & result, const Tensor & self, const Tensor & other) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Char: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Double: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Float: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Int: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Long: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Short: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Half: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
default:
AT_ERROR("_th_fmod_out not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor _th_fmod(const Tensor & self, const Tensor & other) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto result_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto result = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(result_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_fmod", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_cfmod(globalContext().getTHCState(), result_, self_, other_);
break;
}
default:
AT_ERROR("_th_fmod not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor & _th_fmod_(Tensor & self, Scalar other) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toByte();
THCudaByteTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toChar();
THCudaCharTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toDouble();
THCudaDoubleTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toFloat();
THCudaTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toInt();
THCudaIntTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toLong();
THCudaLongTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toShort();
THCudaShortTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = other.toHalf();
THCudaHalfTensor_fmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
default:
AT_ERROR("_th_fmod_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_fmod_(Tensor & self, const Tensor & other) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 3, "_th_fmod_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_cfmod(globalContext().getTHCState(), self_, self_, other_);
break;
}
default:
AT_ERROR("_th_fmod_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _th_cross_kernel_out(Tensor & result, const Tensor & self, const Tensor & other, int64_t dim) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Char: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Double: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Float: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Int: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Long: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Short: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Half: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
default:
AT_ERROR("_th_cross_kernel_out not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor _th_cross_kernel(const Tensor & self, const Tensor & other, int64_t dim) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto result_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto result = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(result_));
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
auto other_ = checked_dense_tensor_unwrap(other, "other", 2, "_th_cross_kernel", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_crossKernel(globalContext().getTHCState(), result_, self_, other_, dim);
break;
}
default:
AT_ERROR("_th_cross_kernel not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
std::tuple<Tensor &,Tensor &> _th_gels_out(Tensor & res1, Tensor & res2, const Tensor & self, const Tensor & A) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto res1_ = checked_dense_tensor_unwrap(res1, "res1", 0, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto res2_ = checked_dense_tensor_unwrap(res2, "res2", 0, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto A_ = checked_dense_tensor_unwrap(A, "A", 2, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_gels(globalContext().getTHCState(), res1_, res2_, self_, A_);
break;
}
case ScalarType::Float: {
auto res1_ = checked_dense_tensor_unwrap(res1, "res1", 0, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto res2_ = checked_dense_tensor_unwrap(res2, "res2", 0, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto A_ = checked_dense_tensor_unwrap(A, "A", 2, "_th_gels_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_gels(globalContext().getTHCState(), res1_, res2_, self_, A_);
break;
}
default:
AT_ERROR("_th_gels_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(res1, res2);
}
std::tuple<Tensor,Tensor> _th_gels(const Tensor & self, const Tensor & A) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto res1_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto res1 = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(res1_));
auto res2_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto res2 = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(res2_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_gels", false, DeviceType::CUDA, dispatch_scalar_type);
auto A_ = checked_dense_tensor_unwrap(A, "A", 2, "_th_gels", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_gels(globalContext().getTHCState(), res1_, res2_, self_, A_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_gels", false, DeviceType::CUDA, dispatch_scalar_type);
auto A_ = checked_dense_tensor_unwrap(A, "A", 2, "_th_gels", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_gels(globalContext().getTHCState(), res1_, res2_, self_, A_);
break;
}
default:
AT_ERROR("_th_gels not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(res1, res2);
}
Tensor & _th_potri_out(Tensor & output, const Tensor & self, bool upper) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto output_ = checked_dense_tensor_unwrap(output, "output", 0, "_th_potri_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_potri_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_potri(globalContext().getTHCState(), output_, self_, upper);
break;
}
case ScalarType::Float: {
auto output_ = checked_dense_tensor_unwrap(output, "output", 0, "_th_potri_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_potri_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_potri(globalContext().getTHCState(), output_, self_, upper);
break;
}
default:
AT_ERROR("_th_potri_out not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor _th_potri(const Tensor & self, bool upper) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_potri", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_potri(globalContext().getTHCState(), output_, self_, upper);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_potri", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_potri(globalContext().getTHCState(), output_, self_, upper);
break;
}
default:
AT_ERROR("_th_potri not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
std::tuple<Tensor &,Tensor &> _th_geqrf_out(Tensor & res1, Tensor & res2, const Tensor & self) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto res1_ = checked_dense_tensor_unwrap(res1, "res1", 0, "_th_geqrf_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto res2_ = checked_dense_tensor_unwrap(res2, "res2", 0, "_th_geqrf_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_geqrf_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_geqrf(globalContext().getTHCState(), res1_, res2_, self_);
break;
}
case ScalarType::Float: {
auto res1_ = checked_dense_tensor_unwrap(res1, "res1", 0, "_th_geqrf_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto res2_ = checked_dense_tensor_unwrap(res2, "res2", 0, "_th_geqrf_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_geqrf_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_geqrf(globalContext().getTHCState(), res1_, res2_, self_);
break;
}
default:
AT_ERROR("_th_geqrf_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(res1, res2);
}
std::tuple<Tensor,Tensor> _th_geqrf(const Tensor & self) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
auto res1_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto res1 = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(res1_));
auto res2_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto res2 = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(res2_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_geqrf", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_geqrf(globalContext().getTHCState(), res1_, res2_, self_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_geqrf", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_geqrf(globalContext().getTHCState(), res1_, res2_, self_);
break;
}
default:
AT_ERROR("_th_geqrf not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(res1, res2);
}
std::tuple<Tensor &,Tensor &> _th_multinomial_alias_setup_out(Tensor & J, Tensor & q, const Tensor & probs) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(J);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto probs_ = checked_dense_tensor_unwrap(probs, "probs", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, ScalarType::Long);
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_multinomialAliasSetup(globalContext().getTHCState(), probs_, J_, q_);
break;
}
case ScalarType::Float: {
auto probs_ = checked_dense_tensor_unwrap(probs, "probs", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, ScalarType::Long);
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_multinomialAliasSetup(globalContext().getTHCState(), probs_, J_, q_);
break;
}
case ScalarType::Half: {
auto probs_ = checked_dense_tensor_unwrap(probs, "probs", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, ScalarType::Long);
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_setup_out", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_multinomialAliasSetup(globalContext().getTHCState(), probs_, J_, q_);
break;
}
default:
AT_ERROR("_th_multinomial_alias_setup_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(J, q);
}
std::tuple<Tensor,Tensor> _th_multinomial_alias_setup(const Tensor & probs) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(probs);
auto J_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(ScalarType::Long)).release();
auto J = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(J_));
auto q_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto q = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(q_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto probs_ = checked_dense_tensor_unwrap(probs, "probs", 1, "_th_multinomial_alias_setup", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_multinomialAliasSetup(globalContext().getTHCState(), probs_, J_, q_);
break;
}
case ScalarType::Float: {
auto probs_ = checked_dense_tensor_unwrap(probs, "probs", 1, "_th_multinomial_alias_setup", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_multinomialAliasSetup(globalContext().getTHCState(), probs_, J_, q_);
break;
}
case ScalarType::Half: {
auto probs_ = checked_dense_tensor_unwrap(probs, "probs", 1, "_th_multinomial_alias_setup", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_multinomialAliasSetup(globalContext().getTHCState(), probs_, J_, q_);
break;
}
default:
AT_ERROR("_th_multinomial_alias_setup not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(J, q);
}
Tensor & _th_multinomial_alias_draw_out(Tensor & result, const Tensor & q, const Tensor & J, int64_t num_samples, c10::optional<Generator> generator) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(result);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, ScalarType::Long);
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 2, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, ScalarType::Long);
THCudaDoubleTensor_multinomialAliasDraw(globalContext().getTHCState(), result_, q_, J_, num_samples, generator);
break;
}
case ScalarType::Float: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, ScalarType::Long);
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 2, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, ScalarType::Long);
THCudaTensor_multinomialAliasDraw(globalContext().getTHCState(), result_, q_, J_, num_samples, generator);
break;
}
case ScalarType::Half: {
auto result_ = checked_dense_tensor_unwrap(result, "result", 0, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, ScalarType::Long);
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 2, "_th_multinomial_alias_draw_out", false, DeviceType::CUDA, ScalarType::Long);
THCudaHalfTensor_multinomialAliasDraw(globalContext().getTHCState(), result_, q_, J_, num_samples, generator);
break;
}
default:
AT_ERROR("_th_multinomial_alias_draw_out not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor _th_multinomial_alias_draw(const Tensor & q, const Tensor & J, int64_t num_samples, c10::optional<Generator> generator) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(q);
auto result_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(ScalarType::Long)).release();
auto result = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(result_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_draw", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 2, "_th_multinomial_alias_draw", false, DeviceType::CUDA, ScalarType::Long);
THCudaDoubleTensor_multinomialAliasDraw(globalContext().getTHCState(), result_, q_, J_, num_samples, generator);
break;
}
case ScalarType::Float: {
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_draw", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 2, "_th_multinomial_alias_draw", false, DeviceType::CUDA, ScalarType::Long);
THCudaTensor_multinomialAliasDraw(globalContext().getTHCState(), result_, q_, J_, num_samples, generator);
break;
}
case ScalarType::Half: {
auto q_ = checked_dense_tensor_unwrap(q, "q", 1, "_th_multinomial_alias_draw", false, DeviceType::CUDA, dispatch_scalar_type);
auto J_ = checked_dense_tensor_unwrap(J, "J", 2, "_th_multinomial_alias_draw", false, DeviceType::CUDA, ScalarType::Long);
THCudaHalfTensor_multinomialAliasDraw(globalContext().getTHCState(), result_, q_, J_, num_samples, generator);
break;
}
default:
AT_ERROR("_th_multinomial_alias_draw not supported on CUDAType for ", dispatch_scalar_type);
}
return result;
}
Tensor & _th_copy_ignoring_overlaps_(Tensor & self, const Tensor & src) {
// DeviceGuard omitted
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Byte: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaByteTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Char: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaCharTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaDoubleTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Int: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaIntTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Long: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaLongTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Short: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaShortTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
auto src_ = checked_dense_tensor_unwrap(src, "src", 2, "_th_copy_ignoring_overlaps_", false, DeviceType::CUDA, dispatch_scalar_type);
THCudaHalfTensor_copyIgnoringOverlaps(globalContext().getTHCState(), self_, src_);
break;
}
default:
AT_ERROR("_th_copy_ignoring_overlaps_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
Tensor & _thnn_multi_margin_loss_forward_out(Tensor & output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 5, "_thnn_multi_margin_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 5, "_thnn_multi_margin_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 5, "_thnn_multi_margin_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_multi_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
default:
AT_ERROR("_thnn_multi_margin_loss_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor _thnn_multi_margin_loss_forward(const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multi_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multi_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 5, "_thnn_multi_margin_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multi_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multi_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 5, "_thnn_multi_margin_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multi_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multi_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 5, "_thnn_multi_margin_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
default:
AT_ERROR("_thnn_multi_margin_loss_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor & _thnn_multi_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 6, "_thnn_multi_margin_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 6, "_thnn_multi_margin_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 6, "_thnn_multi_margin_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_multi_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
default:
AT_ERROR("_thnn_multi_margin_loss_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_multi_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, Scalar p, Scalar margin, const Tensor & weight, int64_t reduction) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 6, "_thnn_multi_margin_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 6, "_thnn_multi_margin_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multi_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto p_ = p.toDouble();
auto margin_ = margin.toDouble();
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 6, "_thnn_multi_margin_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, p_, weight_ ? weight_ : NULL, margin_);
break;
}
default:
AT_ERROR("_thnn_multi_margin_loss_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
std::tuple<Tensor &,Tensor &> _thnn_multilabel_margin_loss_forward_out(Tensor & output, Tensor & is_target, const Tensor & self, const Tensor & target, int64_t reduction) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto output_ = checked_dense_tensor_unwrap(output, "output", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto output_ = checked_dense_tensor_unwrap(output, "output", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto output_ = checked_dense_tensor_unwrap(output, "output", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto output_ = checked_dense_tensor_unwrap(output, "output", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 3, "_thnn_multilabel_margin_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16MultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
default:
AT_ERROR("_thnn_multilabel_margin_loss_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(output, is_target);
}
std::tuple<Tensor,Tensor> _thnn_multilabel_margin_loss_forward(const Tensor & self, const Tensor & target, int64_t reduction) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
auto is_target_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto is_target = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(is_target_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
THNN_CudaDoubleMultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
THNN_CudaMultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
THNN_CudaHalfMultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_multilabel_margin_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
THNN_CudaBFloat16MultiLabelMarginCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, is_target_, reduction);
break;
}
default:
AT_ERROR("_thnn_multilabel_margin_loss_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(output, is_target);
}
Tensor & _thnn_multilabel_margin_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 5, "_thnn_multilabel_margin_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16MultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
default:
AT_ERROR("_thnn_multilabel_margin_loss_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_multilabel_margin_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, int64_t reduction, const Tensor & is_target) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleMultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaMultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfMultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto is_target_ = checked_dense_tensor_unwrap(is_target, "is_target", 5, "_thnn_multilabel_margin_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16MultiLabelMarginCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, is_target_, reduction);
break;
}
default:
AT_ERROR("_thnn_multilabel_margin_loss_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
std::tuple<Tensor &,Tensor &> _thnn_nll_loss_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16ClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(output, total_weight);
}
std::tuple<Tensor,Tensor> _thnn_nll_loss_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
auto total_weight_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto total_weight = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(total_weight_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16ClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(output, total_weight);
}
Tensor & _thnn_nll_loss_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16ClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_nll_loss_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16ClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
std::tuple<Tensor &,Tensor &> _thnn_nll_loss2d_forward_out(Tensor & output, Tensor & total_weight, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleSpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaSpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfSpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 5, "_thnn_nll_loss2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16SpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss2d_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(output, total_weight);
}
std::tuple<Tensor,Tensor> _thnn_nll_loss2d_forward(const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
auto total_weight_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto total_weight = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(total_weight_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleSpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaSpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfSpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 2, "_thnn_nll_loss2d_forward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_nll_loss2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16SpatialClassNLLCriterion_updateOutput(globalContext().getTHCState(), self_, target_, output_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss2d_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(output, total_weight);
}
Tensor & _thnn_nll_loss2d_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleSpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaSpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfSpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_nll_loss2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16SpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss2d_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_nll_loss2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & target, const Tensor & weight, int64_t reduction, int64_t ignore_index, const Tensor & total_weight) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleSpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaSpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfSpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto target_ = checked_dense_tensor_unwrap(target, "target", 3, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, ScalarType::Long);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 4, "_thnn_nll_loss2d_backward", true, DeviceType::CUDA, dispatch_scalar_type);
auto total_weight_ = checked_dense_tensor_unwrap(total_weight, "total_weight", 7, "_thnn_nll_loss2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16SpatialClassNLLCriterion_updateGradInput(globalContext().getTHCState(), self_, target_, grad_output_, grad_input_, reduction, weight_ ? weight_ : NULL, total_weight_, ignore_index);
break;
}
default:
AT_ERROR("_thnn_nll_loss2d_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor & _thnn_glu_forward_out(Tensor & output, const Tensor & self, int64_t dim) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_glu_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 2, "_thnn_glu_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleGatedLinear_updateOutput(globalContext().getTHCState(), self_, output_, dim);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_glu_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 2, "_thnn_glu_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaGatedLinear_updateOutput(globalContext().getTHCState(), self_, output_, dim);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_glu_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 2, "_thnn_glu_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfGatedLinear_updateOutput(globalContext().getTHCState(), self_, output_, dim);
break;
}
default:
AT_ERROR("_thnn_glu_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor _thnn_glu_forward(const Tensor & self, int64_t dim) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_glu_forward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleGatedLinear_updateOutput(globalContext().getTHCState(), self_, output_, dim);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_glu_forward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaGatedLinear_updateOutput(globalContext().getTHCState(), self_, output_, dim);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_glu_forward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfGatedLinear_updateOutput(globalContext().getTHCState(), self_, output_, dim);
break;
}
default:
AT_ERROR("_thnn_glu_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor & _thnn_glu_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, int64_t dim) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 3, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleGatedLinear_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, dim);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 3, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaGatedLinear_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, dim);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 3, "_thnn_glu_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfGatedLinear_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, dim);
break;
}
default:
AT_ERROR("_thnn_glu_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_glu_backward(const Tensor & grad_output, const Tensor & self, int64_t dim) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_glu_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_glu_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleGatedLinear_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, dim);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_glu_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_glu_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaGatedLinear_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, dim);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_glu_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_glu_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfGatedLinear_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, dim);
break;
}
default:
AT_ERROR("_thnn_glu_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
std::tuple<Tensor &,Tensor &> _thnn_log_sigmoid_forward_out(Tensor & output, Tensor & buffer, const Tensor & self) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleLogSigmoid_updateOutput(globalContext().getTHCState(), self_, output_, buffer_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaLogSigmoid_updateOutput(globalContext().getTHCState(), self_, output_, buffer_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto output_ = checked_dense_tensor_unwrap(output, "output", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 1, "_thnn_log_sigmoid_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfLogSigmoid_updateOutput(globalContext().getTHCState(), self_, output_, buffer_);
break;
}
default:
AT_ERROR("_thnn_log_sigmoid_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(output, buffer);
}
std::tuple<Tensor,Tensor> _thnn_log_sigmoid_forward(const Tensor & self) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
auto buffer_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto buffer = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(buffer_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_log_sigmoid_forward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleLogSigmoid_updateOutput(globalContext().getTHCState(), self_, output_, buffer_);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_log_sigmoid_forward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaLogSigmoid_updateOutput(globalContext().getTHCState(), self_, output_, buffer_);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_log_sigmoid_forward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfLogSigmoid_updateOutput(globalContext().getTHCState(), self_, output_, buffer_);
break;
}
default:
AT_ERROR("_thnn_log_sigmoid_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(output, buffer);
}
Tensor & _thnn_log_sigmoid_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & buffer) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 3, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 3, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleLogSigmoid_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, buffer_);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 3, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 3, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaLogSigmoid_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, buffer_);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 3, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 3, "_thnn_log_sigmoid_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfLogSigmoid_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, buffer_);
break;
}
default:
AT_ERROR("_thnn_log_sigmoid_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_log_sigmoid_backward(const Tensor & grad_output, const Tensor & self, const Tensor & buffer) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 3, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleLogSigmoid_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, buffer_);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 3, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaLogSigmoid_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, buffer_);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto buffer_ = checked_dense_tensor_unwrap(buffer, "buffer", 3, "_thnn_log_sigmoid_backward", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfLogSigmoid_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, buffer_);
break;
}
default:
AT_ERROR("_thnn_log_sigmoid_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor & _thnn_rrelu_with_noise_forward_out(Tensor & output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<at::Generator> generator) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleRReLU_updateOutput(globalContext().getTHCState(), self_, output_, noise_, lower_, upper_, training, false, generator);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaRReLU_updateOutput(globalContext().getTHCState(), self_, output_, noise_, lower_, upper_, training, false, generator);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_rrelu_with_noise_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfRReLU_updateOutput(globalContext().getTHCState(), self_, output_, noise_, lower_, upper_, training, false, generator);
break;
}
default:
AT_ERROR("_thnn_rrelu_with_noise_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor _thnn_rrelu_with_noise_forward(const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<at::Generator> generator) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaDoubleRReLU_updateOutput(globalContext().getTHCState(), self_, output_, noise_, lower_, upper_, training, false, generator);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaRReLU_updateOutput(globalContext().getTHCState(), self_, output_, noise_, lower_, upper_, training, false, generator);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaHalfRReLU_updateOutput(globalContext().getTHCState(), self_, output_, noise_, lower_, upper_, training, false, generator);
break;
}
default:
AT_ERROR("_thnn_rrelu_with_noise_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor & _thnn_rrelu_with_noise_backward_out(Tensor & grad_input, const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 3, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 6, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleRReLU_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, noise_, lower_, upper_, training, false);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 3, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 6, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaRReLU_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, noise_, lower_, upper_, training, false);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 3, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 6, "_thnn_rrelu_with_noise_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfRReLU_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, noise_, lower_, upper_, training, false);
break;
}
default:
AT_ERROR("_thnn_rrelu_with_noise_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor _thnn_rrelu_with_noise_backward(const Tensor & grad_output, const Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 3, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaDoubleRReLU_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, noise_, lower_, upper_, training, false);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 3, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaRReLU_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, noise_, lower_, upper_, training, false);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 3, "_thnn_rrelu_with_noise_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaHalfRReLU_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_, noise_, lower_, upper_, training, false);
break;
}
default:
AT_ERROR("_thnn_rrelu_with_noise_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return grad_input;
}
Tensor & _thnn_rrelu_with_noise_forward_(Tensor & self, const Tensor & noise, Scalar lower, Scalar upper, bool training, c10::optional<at::Generator> generator) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward_", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward_", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaDoubleRReLU_updateOutput(globalContext().getTHCState(), self_, self_, noise_, lower_, upper_, training, true, generator);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward_", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward_", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaRReLU_updateOutput(globalContext().getTHCState(), self_, self_, noise_, lower_, upper_, training, true, generator);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_rrelu_with_noise_forward_", false, DeviceType::CUDA, dispatch_scalar_type);
auto noise_ = checked_dense_tensor_unwrap(noise, "noise", 2, "_thnn_rrelu_with_noise_forward_", false, DeviceType::CUDA, dispatch_scalar_type);
auto lower_ = lower.toDouble();
auto upper_ = upper.toDouble();
THNN_CudaHalfRReLU_updateOutput(globalContext().getTHCState(), self_, self_, noise_, lower_, upper_, training, true, generator);
break;
}
default:
AT_ERROR("_thnn_rrelu_with_noise_forward_ not supported on CUDAType for ", dispatch_scalar_type);
}
return self;
}
std::tuple<Tensor &,Tensor &,Tensor &> _thnn_conv2d_forward_out(Tensor & output, Tensor & columns, Tensor & ones, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleSpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaSpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfSpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto output_ = checked_dense_tensor_unwrap(output, "output", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 6, "_thnn_conv2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16SpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
default:
AT_ERROR("_thnn_conv2d_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &, Tensor &>(output, columns, ones);
}
std::tuple<Tensor,Tensor,Tensor> _thnn_conv2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
auto columns_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto columns = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(columns_));
auto ones_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto ones = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(ones_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
THNN_CudaDoubleSpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
THNN_CudaSpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
THNN_CudaHalfSpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
THNN_CudaBFloat16SpatialConvolutionMM_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
break;
}
default:
AT_ERROR("_thnn_conv2d_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor, Tensor>(output, columns, ones);
}
std::tuple<Tensor &,Tensor &,Tensor &> _thnn_conv2d_backward_out(Tensor & grad_input, Tensor & grad_weight, Tensor & grad_bias, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & columns, const Tensor & ones) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_bias_ = checked_dense_tensor_unwrap(grad_bias, "grad_bias", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaDoubleSpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaDoubleSpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_bias_ = checked_dense_tensor_unwrap(grad_bias, "grad_bias", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaSpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaSpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_bias_ = checked_dense_tensor_unwrap(grad_bias, "grad_bias", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaHalfSpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaHalfSpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_bias_ = checked_dense_tensor_unwrap(grad_bias, "grad_bias", 8, "_thnn_conv2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaBFloat16SpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaBFloat16SpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
default:
AT_ERROR("_thnn_conv2d_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &, Tensor &>(grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor,Tensor,Tensor> _thnn_conv2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, const Tensor & columns, const Tensor & ones, std::array<bool,3> output_mask) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = output_mask[0] ? c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release() : nullptr;
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_ == nullptr ? (TensorImpl*)UndefinedTensorImpl::singleton() : (TensorImpl*)grad_input_));
auto grad_weight_ = output_mask[1] ? c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release() : nullptr;
auto grad_weight = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_weight_ == nullptr ? (TensorImpl*)UndefinedTensorImpl::singleton() : (TensorImpl*)grad_weight_));
auto grad_bias_ = output_mask[2] ? c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release() : nullptr;
auto grad_bias = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_bias_ == nullptr ? (TensorImpl*)UndefinedTensorImpl::singleton() : (TensorImpl*)grad_bias_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaDoubleSpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaDoubleSpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaSpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaSpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaHalfSpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaHalfSpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto columns_ = checked_dense_tensor_unwrap(columns, "columns", 7, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto ones_ = checked_dense_tensor_unwrap(ones, "ones", 8, "_thnn_conv2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaBFloat16SpatialConvolutionMM_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0]);
if (grad_weight_ || grad_bias_) THNN_CudaBFloat16SpatialConvolutionMM_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, grad_bias_ ? grad_bias_ : NULL, columns_, ones_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], 1);
break;
}
default:
AT_ERROR("_thnn_conv2d_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
Tensor & _thnn_conv_depthwise2d_forward_out(Tensor & output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto output_ = checked_dense_tensor_unwrap(output, "output", 7, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaDoubleSpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto output_ = checked_dense_tensor_unwrap(output, "output", 7, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaSpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto output_ = checked_dense_tensor_unwrap(output, "output", 7, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaHalfSpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto output_ = checked_dense_tensor_unwrap(output, "output", 7, "_thnn_conv_depthwise2d_forward_out", false, DeviceType::CUDA, dispatch_scalar_type);
THNN_CudaBFloat16SpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
default:
AT_ERROR("_thnn_conv_depthwise2d_forward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
Tensor _thnn_conv_depthwise2d_forward(const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, const Tensor & bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto output_ = c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release();
auto output = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(output_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
THNN_CudaDoubleSpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Float: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
THNN_CudaSpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Half: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
THNN_CudaHalfSpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::BFloat16: {
auto self_ = checked_dense_tensor_unwrap(self, "self", 1, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 2, "_thnn_conv_depthwise2d_forward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 3);
auto bias_ = checked_dense_tensor_unwrap(bias, "bias", 4, "_thnn_conv_depthwise2d_forward", true, DeviceType::CUDA, dispatch_scalar_type);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
THNN_CudaBFloat16SpatialDepthwiseConvolution_updateOutput(globalContext().getTHCState(), self_, output_, weight_, bias_ ? bias_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
default:
AT_ERROR("_thnn_conv_depthwise2d_forward not supported on CUDAType for ", dispatch_scalar_type);
}
return output;
}
std::tuple<Tensor &,Tensor &> _thnn_conv_depthwise2d_backward_out(Tensor & grad_input, Tensor & grad_weight, const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaDoubleSpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaDoubleSpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaSpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaSpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaHalfSpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaHalfSpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward_out", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
auto grad_input_ = checked_dense_tensor_unwrap(grad_input, "grad_input", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
auto grad_weight_ = checked_dense_tensor_unwrap(grad_weight, "grad_weight", 7, "_thnn_conv_depthwise2d_backward_out", true, DeviceType::CUDA, dispatch_scalar_type);
if (grad_input_) THNN_CudaBFloat16SpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaBFloat16SpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
default:
AT_ERROR("_thnn_conv_depthwise2d_backward_out not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor &, Tensor &>(grad_input, grad_weight);
}
std::tuple<Tensor,Tensor> _thnn_conv_depthwise2d_backward(const Tensor & grad_output, const Tensor & self, const Tensor & weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef dilation, std::array<bool,2> output_mask) {
const OptionalDeviceGuard device_guard(device_of(self));
auto dispatch_scalar_type = infer_scalar_type(self);
auto grad_input_ = output_mask[0] ? c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release() : nullptr;
auto grad_input = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_input_ == nullptr ? (TensorImpl*)UndefinedTensorImpl::singleton() : (TensorImpl*)grad_input_));
auto grad_weight_ = output_mask[1] ? c10::make_intrusive<TensorImpl, UndefinedTensorImpl>(c10::Storage(c10::Storage::use_byte_size_t(), 0, allocator(), true),DispatchKey::CUDA, scalarTypeToTypeMeta(dispatch_scalar_type)).release() : nullptr;
auto grad_weight = Tensor(c10::intrusive_ptr<TensorImpl, UndefinedTensorImpl>::reclaim(grad_weight_ == nullptr ? (TensorImpl*)UndefinedTensorImpl::singleton() : (TensorImpl*)grad_weight_));
switch (dispatch_scalar_type) {
case ScalarType::Double: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
if (grad_input_) THNN_CudaDoubleSpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaDoubleSpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Float: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
if (grad_input_) THNN_CudaSpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaSpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::Half: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
if (grad_input_) THNN_CudaHalfSpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaHalfSpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
case ScalarType::BFloat16: {
auto grad_output_ = checked_dense_tensor_unwrap(grad_output, "grad_output", 1, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto self_ = checked_dense_tensor_unwrap(self, "self", 2, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto weight_ = checked_dense_tensor_unwrap(weight, "weight", 3, "_thnn_conv_depthwise2d_backward", false, DeviceType::CUDA, dispatch_scalar_type);
auto kernel_size_ = check_intlist<2>(kernel_size, "kernel_size", 4);
auto stride_ = check_intlist<2>(stride, "stride", 5);
auto padding_ = check_intlist<2>(padding, "padding", 6);
auto dilation_ = check_intlist<2>(dilation, "dilation", 7);
if (grad_input_) THNN_CudaBFloat16SpatialDepthwiseConvolution_updateGradInput(globalContext().getTHCState(), self_, grad_output_, grad_input_ ? grad_input_ : NULL, weight_, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
if (grad_weight_) THNN_CudaBFloat16SpatialDepthwiseConvolution_accGradParameters(globalContext().getTHCState(), self_, grad_output_, grad_weight_ ? grad_weight_ : NULL, kernel_size_[1], kernel_size_[0], stride_[1], stride_[0], padding_[1], padding_[0], dilation_[1], dilation_[0]);
break;
}
default:
AT_ERROR("_thnn_conv_depthwise2d_backward not supported on CUDAType for ", dispatch_scalar_type);
}
return std::tuple<Tensor, Tensor>(grad_input, grad_weight);
}
} // namespace th
} // namespace legacy
} // namespace native
} // namespace at
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
a4f58f3c600802354f61d7836986c0b50d2c9d83 | b1b734ab75a6fe114733d3c0b8ca5046d54b407d | /third_party/ComputeLibrary/arm_compute/runtime/CL/functions/CLSobel7x7.h | 0dc0a1c5e95513fbc06b1dc6f6438aa7efc6ae26 | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"Apache-2.0",
"MIT"
] | permissive | waybarrios/video_nonlocal_net_caffe2 | 754fea2b96318d677144f16faadf59cb6b00189b | b19c2ac3ddc1836d90d7d0fccb60d710c017253e | refs/heads/master | 2020-04-20T03:15:12.286080 | 2019-01-31T20:44:01 | 2019-01-31T20:44:01 | 168,593,110 | 0 | 0 | Apache-2.0 | 2019-01-31T20:40:40 | 2019-01-31T20:40:39 | null | UTF-8 | C++ | false | false | 3,589 | h | /*
* Copyright (c) 2016, 2017 ARM Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __ARM_COMPUTE_CLSOBEL7X7_H__
#define __ARM_COMPUTE_CLSOBEL7X7_H__
#include "arm_compute/core/CL/kernels/CLFillBorderKernel.h"
#include "arm_compute/core/CL/kernels/CLSobel7x7Kernel.h"
#include "arm_compute/core/Types.h"
#include "arm_compute/runtime/CL/CLMemoryGroup.h"
#include "arm_compute/runtime/CL/CLTensor.h"
#include "arm_compute/runtime/IFunction.h"
#include "arm_compute/runtime/IMemoryManager.h"
#include <cstdint>
#include <memory>
namespace arm_compute
{
class ICLTensor;
/** Basic function to execute sobel 7x7 filter. This function calls the following OpenCL kernels:
*
* -# @ref CLFillBorderKernel (executed if border_mode == CONSTANT or border_mode == REPLICATE)
* -# @ref CLSobel7x7HorKernel
* -# @ref CLSobel7x7VertKernel
*
*/
class CLSobel7x7 : public IFunction
{
public:
/** Default Constructor. */
CLSobel7x7(std::shared_ptr<IMemoryManager> memory_manager = nullptr);
/** Initialise the function's source, destinations and border mode.
*
* @note At least one of output_x or output_y must be not NULL.
*
* @param[in,out] input Source tensor. Data types supported: U8. (Written to only for @p border_mode != UNDEFINED)
* @param[out] output_x (optional) Destination for the Sobel 7x7 convolution along the X axis. Data types supported: S32.
* @param[out] output_y (optional) Destination for the Sobel 7x7 convolution along the Y axis. Data types supported: S32.
* @param[in] border_mode Border mode to use for the convolution.
* @param[in] constant_border_value (Optional) Constant value to use for borders if border_mode is set to CONSTANT.
*/
void configure(ICLTensor *input, ICLTensor *output_x, ICLTensor *output_y, BorderMode border_mode, uint8_t constant_border_value = 0);
// Inherited methods overridden:
void run() override;
protected:
CLMemoryGroup _memory_group; /**< Function's memory group */
CLSobel7x7HorKernel _sobel_hor; /**< Sobel Horizontal 7x7 kernel */
CLSobel7x7VertKernel _sobel_vert; /**< Sobel Vertical 7x7 kernel */
CLFillBorderKernel _border_handler; /**< Kernel to handle image borders */
CLImage _tmp_x; /**< Temporary buffer for Sobel X */
CLImage _tmp_y; /**< Temporary buffer for Sobel Y */
};
}
#endif /*__ARM_COMPUTE_CLSOBEL7X7_H__ */
| [
"gemfield@civilnet.cn"
] | gemfield@civilnet.cn |
567b6a53655590ed63e75cdb724a2cdce04344bf | dca653bb975528bd1b8ab2547f6ef4f48e15b7b7 | /tags/wxPy-2.8.1.0/src/common/choiccmn.cpp | db5fea5e20adcd11e8c5a4c1a3ce2876297d4aab | [] | no_license | czxxjtu/wxPython-1 | 51ca2f62ff6c01722e50742d1813f4be378c0517 | 6a7473c258ea4105f44e31d140ea5c0ae6bc46d8 | refs/heads/master | 2021-01-15T12:09:59.328778 | 2015-01-05T20:55:10 | 2015-01-05T20:55:10 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,589 | cpp | /////////////////////////////////////////////////////////////////////////////
// Name: src/common/choiccmn.cpp
// Purpose: common (to all ports) wxChoice functions
// Author: Vadim Zeitlin
// Modified by:
// Created: 26.07.99
// RCS-ID: $Id$
// Copyright: (c) wxWidgets team
// Licence: wxWindows licence
/////////////////////////////////////////////////////////////////////////////
// ============================================================================
// declarations
// ============================================================================
// ----------------------------------------------------------------------------
// headers
// ----------------------------------------------------------------------------
// For compilers that support precompilation, includes "wx.h".
#include "wx/wxprec.h"
#ifdef __BORLANDC__
#pragma hdrstop
#endif
#if wxUSE_CHOICE
#include "wx/choice.h"
#ifndef WX_PRECOMP
#endif
const wxChar wxChoiceNameStr[] = wxT("choice");
// ============================================================================
// implementation
// ============================================================================
wxChoiceBase::~wxChoiceBase()
{
// this destructor is required for Darwin
}
// ----------------------------------------------------------------------------
// misc
// ----------------------------------------------------------------------------
void wxChoiceBase::Command(wxCommandEvent& event)
{
SetSelection(event.GetInt());
(void)ProcessEvent(event);
}
#endif // wxUSE_CHOICE
| [
"RD@c3d73ce0-8a6f-49c7-b76d-6d57e0e08775"
] | RD@c3d73ce0-8a6f-49c7-b76d-6d57e0e08775 |
05d8a4ee612a8faae2c73f918e177d43a4d8f254 | d1cd16d98ee35d1e57ff0185b2b412d8fd7b9bb3 | /examples/abitest/client/abitest.cc | b0710794c4de5ccd48145877fb6d64ad53d90569 | [
"Apache-2.0"
] | permissive | axel883/oak | c17b291dbea9cfdee5789fad2eb1cbc28a9c20cc | 3ca461e2d2a79c059eed61b79d1f526d883b60e6 | refs/heads/master | 2020-08-03T08:42:38.403923 | 2019-09-27T06:06:46 | 2019-09-27T11:45:48 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 7,605 | cc | /*
* Copyright 2019 The Project Oak Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "absl/flags/flag.h"
#include "absl/flags/parse.h"
#include "absl/memory/memory.h"
#include "asylo/util/logging.h"
#include "examples/abitest/proto/abitest.grpc.pb.h"
#include "examples/abitest/proto/abitest.pb.h"
#include "examples/utils/utils.h"
#include "google/protobuf/text_format.h"
#include "include/grpcpp/grpcpp.h"
#include "oak/client/application_client.h"
#include "oak/common/app_config.h"
#include "oak/proto/manager.grpc.pb.h"
ABSL_FLAG(std::string, manager_address, "127.0.0.1:8888",
"Address of the Oak Manager to connect to");
ABSL_FLAG(std::vector<std::string>, module, std::vector<std::string>{},
"Files containing the compiled WebAssembly modules (as 'backend,frontend')");
ABSL_FLAG(std::string, test_filter, "", "Filter indicating which tests to run");
using ::oak::examples::abitest::ABITestRequest;
using ::oak::examples::abitest::ABITestResponse;
using ::oak::examples::abitest::OakABITestService;
// Application config as text proto. Deliberately use non-default names for
// nodes and ports to confirm that nothing has been accidentally hard-coded.
static const char* app_config_textproto = R"raw(nodes {
node_name: "frontend"
web_assembly_node {
module_bytes: "<filled in later>"
ports {
name: "gRPC_input"
type: IN
}
ports {
name: "gRPC_output"
type: OUT
}
ports {
name: "logging_port"
type: OUT
}
ports {
name: "to_backend"
type: OUT
}
ports {
name: "from_backend"
type: IN
}
}
}
nodes {
node_name: "backend"
web_assembly_node {
module_bytes: "<filled in later>"
ports {
name: "be_logging_port"
type: OUT
}
ports {
name: "from_frontend"
type: IN
}
ports {
name: "to_frontend"
type: OUT
}
}
}
nodes {
node_name: "grpc_server"
grpc_server_node {}
}
nodes {
node_name: "logging_node"
log_node {}
}
channels {
source_endpoint {
node_name: "grpc_server"
port_name: "request"
}
destination_endpoint {
node_name: "frontend"
port_name: "gRPC_input"
}
}
channels {
source_endpoint {
node_name: "frontend"
port_name: "gRPC_output"
}
destination_endpoint {
node_name: "grpc_server"
port_name: "response"
}
}
channels {
source_endpoint {
node_name: "frontend"
port_name: "logging_port"
}
destination_endpoint {
node_name: "logging_node"
port_name: "in"
}
}
channels {
source_endpoint {
node_name: "backend"
port_name: "be_logging_port"
}
destination_endpoint {
node_name: "logging_node"
port_name: "in"
}
}
channels {
source_endpoint {
node_name: "frontend"
port_name: "to_backend"
}
destination_endpoint {
node_name: "backend"
port_name: "from_frontend"
}
}
channels {
source_endpoint {
node_name: "backend"
port_name: "to_frontend"
}
destination_endpoint {
node_name: "frontend"
port_name: "from_backend"
}
}
)raw";
static bool run_tests(OakABITestService::Stub* stub, const std::string& filter) {
grpc::ClientContext context;
ABITestRequest request;
request.set_filter(filter);
LOG(INFO) << "Run tests matching: '" << filter << "'";
ABITestResponse response;
grpc::Status status = stub->RunTests(&context, request, &response);
if (!status.ok()) {
LOG(WARNING) << "Could not call RunTests('" << filter << "'): " << status.error_code() << ": "
<< status.error_message();
return false;
}
bool success = true;
for (const auto& result : response.results()) {
LOG(INFO) << "[ " << (result.success() ? " OK " : "FAIL") << " ] " << result.name();
if (!result.success()) {
success = false;
LOG(INFO) << " Details: " << result.details();
}
}
return success;
}
int main(int argc, char** argv) {
int rc = 0;
absl::ParseCommandLine(argc, argv);
std::vector<std::string> modules = absl::GetFlag(FLAGS_module);
if (modules.size() < 2) {
LOG(QFATAL) << "Need --module=backend,frontend flag";
}
// Connect to the Oak Manager.
auto channel =
grpc::CreateChannel(absl::GetFlag(FLAGS_manager_address), grpc::InsecureChannelCredentials());
auto manager_stub = oak::Manager::NewStub(channel, grpc::StubOptions());
// Load the Oak Modules to execute. This needs to be compiled from Rust to WebAssembly separately.
std::string backend_module_bytes = oak::utils::read_file(modules[0]);
std::string frontend_module_bytes = oak::utils::read_file(modules[1]);
// Build an application configuration with two Wasm nodes.
auto config = absl::make_unique<oak::ApplicationConfiguration>();
google::protobuf::TextFormat::MergeFromString(app_config_textproto, config.get());
// Add the Wasm module bytes to the config.
for (auto& node : *config->mutable_nodes()) {
if (!node.has_web_assembly_node()) {
continue;
}
if (node.node_name() == "frontend") {
node.mutable_web_assembly_node()->set_module_bytes(frontend_module_bytes);
} else if (node.node_name() == "backend") {
node.mutable_web_assembly_node()->set_module_bytes(backend_module_bytes);
}
}
if (!ValidApplicationConfig(*config)) {
LOG(QFATAL) << "Application config is not valid";
}
grpc::ClientContext create_ctx;
oak::CreateApplicationRequest create_req;
oak::CreateApplicationResponse create_rsp;
create_req.set_allocated_application_configuration(config.release());
LOG(INFO) << "Creating multi-Node Oak Application";
grpc::Status status = manager_stub->CreateApplication(&create_ctx, create_req, &create_rsp);
if (!status.ok()) {
LOG(QFATAL) << "Failed: " << status.error_code() << '/' << status.error_message() << '/'
<< status.error_details();
}
std::stringstream addr;
addr << "127.0.0.1:" << create_rsp.grpc_port();
std::string application_id(create_rsp.application_id());
LOG(INFO) << "Connecting to Oak Application id=" << application_id << ": " << addr.str();
oak::ApplicationClient::InitializeAssertionAuthorities();
// Connect to the newly created Oak Application.
auto stub = OakABITestService::NewStub(grpc::CreateChannel(
addr.str(), asylo::EnclaveChannelCredentials(asylo::BidirectionalNullCredentialsOptions())));
// Invoke the application.
if (!run_tests(stub.get(), absl::GetFlag(FLAGS_test_filter))) {
rc = 1;
}
// Request termination of the Oak Application.
LOG(INFO) << "Terminating application id=" << application_id;
grpc::ClientContext term_ctx;
oak::TerminateApplicationRequest term_req;
oak::TerminateApplicationResponse term_rsp;
term_req.set_application_id(application_id);
LOG(INFO) << "Terminating Oak Application";
status = manager_stub->TerminateApplication(&term_ctx, term_req, &term_rsp);
if (!status.ok()) {
LOG(ERROR) << "Termination failed: " << status.error_code() << '/' << status.error_message()
<< '/' << status.error_details();
rc = 1;
}
return rc;
}
| [
"drysdale@google.com"
] | drysdale@google.com |
ca3e317aec72ee1b0d45ebc5ef9c717faed20f95 | a62127105b45751fc23d62c83cc5e6fc26ac0bba | /DesignPatterns/Maze/Door.cpp | e80f63a9e4482387f506302159fdebd069c09c58 | [] | no_license | CloakMe/MyStuff | d4be758f543242802fda6163b94cbc6d5db4ae98 | ccd29bbc002ca26dab9e8874a4c64601ab64f2f6 | refs/heads/master | 2022-10-15T16:37:28.728167 | 2022-10-13T07:10:07 | 2022-10-13T07:10:07 | 16,573,913 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 292 | cpp | #include "Door.h"
using namespace MazeApp;
Door::Door(Room* room1, Room* room2)
{
mRoom1 = room1;
mRoom2 = room2;
mIsOpen = false;
}
Room* Door::OtherSideFrom(Room* room)
{
if (room == mRoom1)
return mRoom2;
if (room == mRoom2)
return mRoom1;
return 0;
}
void Door::Enter()
{
} | [
"ff@abv.bg"
] | ff@abv.bg |
aa6fb8a5bb7ca383965a7c38c56dafbe1a4bfdfd | 333908d32505e317ec683fbfb3f4ffd7d7d2198c | /project/CatchAFairy/Ishikawa/DirectX/Texture/ITFileTextureLoader.h | 358e23c14ee3a51e039e85a58f8830c7ca322be2 | [] | no_license | TakayoshiIshikawa/CatchAFairy | ac4a963802a1d7bf9f33d48102a00d2bab557e7a | 645b690612744dc759b88c6ba488bf9b3e466181 | refs/heads/master | 2020-09-12T08:55:29.492895 | 2019-11-29T01:10:43 | 2019-11-29T01:10:43 | 222,374,532 | 0 | 0 | null | null | null | null | SHIFT_JIS | C++ | false | false | 1,623 | h | //=============================================================================
// ITFileTextureLoader.h
//
// 自作ファイルテクスチャローダのヘッダファイル
//
// Copyright(c) 2019 Ishikawa Takayoshi All Rights Reserved.
//=============================================================================
#ifndef __IT_FILE_TEXTURE_LOADER_H__
#define __IT_FILE_TEXTURE_LOADER_H__
struct ID3D11Device;
struct ID3D11Texture2D;
struct ID3D11ShaderResourceView;
#include "Ishikawa/Common/PatternBase/SingletonBase.h"
namespace Ishikawa{
namespace DirectX{
namespace Texture{
/// <summary>
/// 自作ファイルテクスチャローダ
/// </summary>
class FileTextureLoader final : public ::Ishikawa::Common::SingletonBase{
public:
// GetInstance<FileTextureLoader>()でアクセスするためフレンド化
friend class Ishikawa::Common::SingletonManager;
private:
// シングルトンインスタンス
static FileTextureLoader* instance;
private:
// デフォルトコンストラクタ
FileTextureLoader();
// デストラクタ
~FileTextureLoader();
private:
/// <summary>コピーコンストラクタ[削除]</summary>
FileTextureLoader(const FileTextureLoader&) = delete;
/// <summary>代入演算子[削除]</summary>
FileTextureLoader& operator=(const FileTextureLoader&) = delete;
public:
// 読み込み
void Load(
ID3D11Device* const _device,
const wchar_t* const _filepath,
ID3D11Texture2D** _texture,
ID3D11ShaderResourceView** _shaderResourceView
);
};
}
}
}
#endif
| [
"tkis0rei@gmail.com"
] | tkis0rei@gmail.com |
7d7cf227fdbb0ff08ff05c64bfb5c5a7219a4e68 | 9655c1a76de83187abaafe13c5f0ae0d38746a93 | /July/Review 2/Coral.cpp | 22fe72c9cffdb0f268f1de8bf7a34a9a7ef89a67 | [] | no_license | PunnyOz2/POSN-Programming | 26a2df48e4f4c164b3c364d00ebef85b5df78ccd | c89cf7765354f8b5ff1f6f95b8382251f5ca734c | refs/heads/main | 2023-05-25T09:04:23.802021 | 2021-06-13T05:43:15 | 2021-06-13T05:43:15 | 376,192,660 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 867 | cpp | /*
TASK:
LANG: CPP
AUTHOR: Pun~
SCHOOL: CRU
*/
#include<bits/stdc++.h>
using namespace std;
int tree[5000010];
int countt;
void update(int l,int r,int a,int b,int now){
if(r<a || l>b)return;
if(l==r)return;
if(l==a && r==b){
if(tree[now]==0)tree[now]=1;
if(tree[now*2]==1 && tree[now*2+1]==1 && now!=1)tree[now]=1,update(l,r*2-l,a,b,now/2);
int ch=0;
}
int mid=(l+r)/2;
if(tree[now*2]==1)update(mid+1,r,a,b,now*2+1);
else if(tree[now*2+1]==1)update(l,mid,a,b,now*2);
}
int main()
{
int n,k,ans=-1,i,a,b,space;
scanf("%d %d",&n,&k);
for(i=1;i<=k;i++){
scanf("%d %d",&a,&b);
space=1<<a;
update(0,n-1,b,b+space-1,1);
if(countt==1<<n){
ans=i;
countt++;
}
}
printf("%d\n",ans);
return 0;
}
/*
1 3
0 0
1 0
0 1
*/
| [
"punnyoz1103@gmail.com"
] | punnyoz1103@gmail.com |
9543ac73355443fa4d4d70d4684f6287c17c8a8d | efaa99151040b620edba3dd5eb91e7eb7cc48b6b | /schooltime/mainwindow.cpp | ba676680b2c104d58cd60b2c465e15f6957962a0 | [] | no_license | emelon8/schooltime | 863b60d42d241352a7eda980a2dfd63e37a41637 | 7e59f65f26e95aad045cf050f53bbe5ff2c52453 | refs/heads/master | 2021-01-11T22:06:32.095192 | 2017-01-16T22:17:39 | 2017-01-16T22:17:39 | 78,925,501 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,532 | cpp | #include "mainwindow.h"
#include "ui_mainwindow.h"
#include <string>
#include <iostream>
#include <vector>
using namespace std;
MainWindow::MainWindow(QWidget *parent) :
QMainWindow(parent),
ui(new Ui::MainWindow)
{
ui->setupUi(this);
QObject::connect(ui->openArrivalButton, SIGNAL(clicked()), this, SLOT(openArrivalButton_wasclicked()));
QObject::connect(ui->openDepartureButton, SIGNAL(clicked()), this, SLOT(openDepartureButton_wasclicked()));
QObject::connect(ui->computeButton, SIGNAL(clicked()), this, SLOT(on_computeButton_clicked()));
}
MainWindow::~MainWindow()
{
delete ui;
}
void MainWindow::openArrivalButton_wasclicked()
{
string openfilestring = (ui->openArrivalEdit->text()).toStdString();
myschooltime.readArrivalTime(openfilestring);
// string openfilestring = openfilestring_temp;
// for (string::iterator findtheslash = openfilestring_temp.begin();findtheslash != openfilestring_temp.end();findtheslash++) {
// if (*findtheslash == "\\")
// openfilestring.replace(findtheslash-1,0,"\\\\");
// }
}
void MainWindow::openDepartureButton_wasclicked()
{
string openfilestring = (ui->openDepartureEdit->text()).toStdString();
myschooltime.readDepartureTime(openfilestring);
}
void MainWindow::on_computeButton_clicked()
{
double ast = myschooltime.avgdailyschooltime();
ui->hourslineEdit->setText(QString::number(myschooltime.schoolhours(ast)));
ui->minuteslineEdit->setText(QString::number(myschooltime.schoolminutes(ast)));
}
| [
"eric.melonakos@gmail.com"
] | eric.melonakos@gmail.com |
5151deebe64f0d0f60925822d6bc604cfb25c118 | c60331c2fa72988af73c688d2f222c185559c080 | /demo/process/reagentdialog.cpp | 041b475bde3a4be4b50361ac275a76f3c1df5810 | [] | no_license | heyuyi/qt-gui | 4610b82f75a69c26d003ef9b2bdcc960a24b48f1 | 3588e859e2beeaae6fdeea7d7e7843e8f1b58743 | refs/heads/master | 2020-04-02T20:07:15.553652 | 2016-08-04T04:40:40 | 2016-08-04T04:40:40 | 60,269,964 | 2 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 2,658 | cpp | #include "reagentdialog.h"
#include "ui_reagentdialog.h"
#include "base/systembase.h"
ReagentDialog::ReagentDialog(QWidget *parent) :
QDialog(parent, Qt::FramelessWindowHint),
ui(new Ui::ReagentDialog)
{
ui->setupUi(this);
setFixedSize(SystemBase::dialogWidth, SystemBase::dialogHeight);
QPalette pale;
pale.setBrush(QPalette::Background, QBrush(QPixmap(SystemBase::path + QString("/../resource/base/background.jpg"))));
setPalette(pale);
ui->pictureLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/00.jpg")));
ui->changeLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/change_up.jpg")));
ui->finishLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/finish_up.jpg")));
ui->confirmLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/confirm_up.jpg")));
changeTimer = new QTimer(this);
connect(changeTimer, SIGNAL(timeout()), this, SLOT(changeTimerSLOT()));
ui->dateLabel->setText(QDate::currentDate().toString("yyyy/MM/dd"));
QTimer *timer = new QTimer(this);
connect(timer, SIGNAL(timeout()), this, SLOT(dateTimerSLOT()));
timer->start(1000);
}
ReagentDialog::~ReagentDialog()
{
delete ui;
}
void ReagentDialog::dateTimerSLOT(void)
{
ui->dateLabel->setText(QDate::currentDate().toString("yyyy/MM/dd"));
}
void ReagentDialog::changeTimerSLOT(void)
{
changeCnt++;
if(changeCnt <= 10)
ui->pictureLabel->setPixmap(QPixmap(SystemBase::path + "/../resource/reagent/" + QString::number(changeCnt) + "0.jpg"));
else
changeTimer->stop();
}
void ReagentDialog::on_changeLabel_pressed()
{
ui->changeLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/change_down.jpg")));
}
void ReagentDialog::on_changeLabel_released()
{
changeCnt = 0;
changeTimer->start(300);
ui->changeLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/change_up.jpg")));
}
void ReagentDialog::on_finishLabel_pressed()
{
ui->finishLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/finish_down.jpg")));
}
void ReagentDialog::on_finishLabel_released()
{
ui->finishLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/finish_up.jpg")));
}
void ReagentDialog::on_confirmLabel_pressed()
{
ui->confirmLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/confirm_down.jpg")));
}
void ReagentDialog::on_confirmLabel_released()
{
// ui->confirmLabel->setPixmap(QPixmap(SystemBase::path + QString("/../resource/reagent/confirm_up.jpg")));
accept();
}
| [
"1244620185@qq.com"
] | 1244620185@qq.com |
41c49a3224a04b632b9bb7c6dc75ed9b9d41ddc9 | db84bf6382c21920c3649b184f20ea48f54c3048 | /lngsgesetups/src/MGEPVDetector.cc | 084ffd3105a22fac0ce5658cc3c618e371ef77e7 | [] | no_license | liebercanis/MaGeLAr | 85c540e3b4c5a48edea9bc0520c9d1a1dcbae73c | aa30b01f3c9c0f5de0f040d05681d358860a31b3 | refs/heads/master | 2020-09-20T12:48:38.106634 | 2020-03-06T18:43:19 | 2020-03-06T18:43:19 | 224,483,424 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 48,555 | cc | //---------------------------------------------------------------------------//
//bb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nu//
// //
// //
// MaGe Simulation //
// //
// This code implementation is the intellectual property of the //
// MAJORANA and Gerda Collaborations. It is based on Geant4, an //
// intellectual property of the RD44 GEANT4 collaboration. //
// //
// ********************* //
// //
// Neither the authors of this software system, nor their employing //
// institutes, nor the agencies providing financial support for this //
// work make any representation or warranty, express or implied, //
// regarding this software system or assume any liability for its use. //
// By copying, distributing or modifying the Program (or any work based //
// on on the Program) you indicate your acceptance of this statement, //
// and all its terms. //
// //
//bb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nubb0nu//
//---------------------------------------------------------------------------//
//
// $Id: MGEPVDetector.cc,v 1.1 2010-03-04 16:39:00 matthiasl Exp $
//
// CLASS IMPLEMENTATION: MGEPVDetector
//
//---------------------------------------------------------------------------//
/**
* SPECIAL NOTES:
*/
//
//---------------------------------------------------------------------------//
/**
* AUTHOR: Matthias Laubenstein
* CONTACT:
* FIRST SUBMISSION:
*
* REVISION:
*
* 03-08-2010, Added in MaGe, Luciano
*
*/
//---------------------------------------------------------------------------//
//
//#include "globals.hh"
#include "G4Box.hh"
#include "G4Sphere.hh"
#include "G4LogicalVolume.hh"
#include "G4Material.hh"
#include "G4VisAttributes.hh"
#include "G4Material.hh"
#include "G4SubtractionSolid.hh"
#include "G4UnionSolid.hh"
#include "G4Box.hh"
#include "G4Trap.hh"
#include "G4Tubs.hh"
#include "G4Cons.hh"
#include "G4ThreeVector.hh"
#include "G4VisAttributes.hh"
#include "G4Colour.hh"
#include "G4PVPlacement.hh"
#include "G4SDManager.hh"
#include "G4GeometryManager.hh"
#include "gerdageometry/GEGeometrySD.hh"
#include "geometry/MGGeometryGlobals.hh"
#include "io/MGLogger.hh"
//---------------------------------------------------------------------------//
#include "lngsgesetups/MGEPVDetector.hh"
#include "lngsgesetups/MGEPVDetectorMessenger.hh"
using namespace CLHEP;
//---------------------------------------------------------------------------//
MGEPVDetector::MGEPVDetector() : MGGeometryDetector("")
{
//set defaults
sample_geom="smarinelli";
//box height is also liquid level in the standard box
box_height=1.1*cm;
box_width=2.*cm;
box_thickness=2.*cm;
// smarinelli_height=5.2*cm;
smarinelli_depth1=16.4*cm;
smarinelli_depth2=10.15*cm;
smarinelli_depth3=7.8*cm;
/*
G4double marinellibox_height=17.6*cm;
G4double marinellibox_width=18.5*cm;
G4double marinellibox_thickness=17.5*cm;
G4double marinellitube_length=11.1*cm;
G4double marinellitube_outrad=4.95*cm;
*/
/* tube_innrad=0*cm;
tube_outrad=3.65*cm;
tube_length=2.8*cm;
*/ tube_angle=twopi;
sphere_rad=0.00001*cm;
s_x=0.*cm;
s_y=0.*cm;
s_z=1.6*cm;
//crystal parameters
d_z=0.4*cm;
d_dead=0.15*cm;
// f_size=1.;
hole_d=0.97*cm;
hole_l=6.57*cm;
crystal_d=7.72*cm;
crystal_l=7.87*cm;
theMessenger = new MGEPVDetectorMessenger(this);
}
//---------------------------------------------------------------------------//
MGEPVDetector::~MGEPVDetector()
{
delete theMessenger;
}
//---------------------------------------------------------------------------//
void MGEPVDetector::ConstructDetector()
{
// Retrieve materials from the Table
G4Material* Vacuum = G4Material::GetMaterial("Vacuum");
// G4Material* Brass = G4Material::GetMaterial("Brass");
// G4Material* ProportionalGas = G4Material::GetMaterial("ProportionalGas");
G4Material* N2Gas = G4Material::GetMaterial("NitrogenGas");
G4Material* Ge = G4Material::GetMaterial("NaturalGe");
G4Material* GeLi = G4Material::GetMaterial("Germanium/Lithium");
G4Material* Pb = G4Material::GetMaterial("MetalLead");
// G4Material* Fe = G4Material::GetMaterial("MetalIron");
G4Material* Cu = G4Material::GetMaterial("MetalCopper");
G4Material* Au = G4Material::GetMaterial("MetalGold");
// G4Material* Vespel = G4Material::GetMaterial("Vespel");
G4Material* Acrylic = G4Material::GetMaterial("Acrylic");
G4Material* PE = G4Material::GetMaterial("PE");
G4Material* PS = G4Material::GetMaterial("PS");
G4Material* Polypropylene = G4Material::GetMaterial("Polypropylene");
// G4Material* Teflon = G4Material::GetMaterial("Teflon");
G4Material* Air = G4Material::GetMaterial("Air");
G4Material* PET = G4Material::GetMaterial("PET");
// G4Material* epoxy = G4Material::GetMaterial("Epoxy");
// G4Material* CU5 = G4Material::GetMaterial("CU5");
// G4Material* PPO = G4Material::GetMaterial("Polypropylene");
// G4Material* dmp = G4Material::GetMaterial("DMP");
// Visualization attributes
/* G4VisAttributes * red= new G4VisAttributes( G4Colour(255/255. ,0/255. ,12/255. ));
G4VisAttributes * Brasscolour = red;
*/
G4VisAttributes * Sample_Colour = new G4VisAttributes( G4Colour(255/255. ,0/255. ,12/255. ));
Sample_Colour -> SetForceSolid(true);
G4VisAttributes * skyblue = new G4VisAttributes( G4Colour(175/255. ,164/255. ,255/255. ));
G4VisAttributes * Vacuumcolour = skyblue;
G4VisAttributes * orange= new G4VisAttributes( G4Colour(241/255. ,224/255. ,0/255. ));
G4VisAttributes * Acryliccolour = orange;
G4VisAttributes * darkviolet = new G4VisAttributes( G4Colour(64/255. ,0/255. ,64/255. ));
G4VisAttributes * Pbcolour = darkviolet;
G4VisAttributes * grass= new G4VisAttributes( G4Colour(0/255. ,239/255. ,0/255. ));
G4VisAttributes * Cucolour = grass;
G4VisAttributes * violet= new G4VisAttributes( G4Colour(224/255. ,0/255. ,255/255. ));
G4VisAttributes * Gecolour = violet;
G4VisAttributes * purple= new G4VisAttributes( G4Colour(187/255. ,28/255. ,0/255. ));
G4VisAttributes * GeLicolour = purple;
/* G4VisAttributes * redbrown = new G4VisAttributes( G4Colour(123/255. ,45/255. ,65/255. ));
G4VisAttributes * Fecolour = redbrown;
//Fecolour -> SetForceSolid(true);
*/
G4VisAttributes * blue = new G4VisAttributes( G4Colour(0/255. ,0/255. ,255/255. ));
G4VisAttributes * N2Gascolour = blue;
G4VisAttributes * grayva = new G4VisAttributes( G4Colour(210/255. ,213/255. ,210/255. ));
G4VisAttributes * Aucolour = grayva;
// Alcolour -> SetForceSolid(true);
G4VisAttributes * yellowgray= new G4VisAttributes( G4Colour(214/255. ,214/255. ,207/255. ));
G4VisAttributes * PEcolour = yellowgray;
G4VisAttributes * indigo= new G4VisAttributes( G4Colour(0/255. ,0/255. ,190/255. ));
G4VisAttributes * PETcolour = indigo;
//------------------------------------------------------ volumes
//------------------------------ experimental hall (world volume)
G4double expHall_x = 1*m;
G4double expHall_y = 1*m;
G4double expHall_z = 1*m;
G4Box* experimentalHall_box
= new G4Box("expHall_box", expHall_x, expHall_y, expHall_z);
G4LogicalVolume* experimentalHall_log = new
G4LogicalVolume(experimentalHall_box, Air, "expHall_log", 0, 0, 0);
experimentalHall_log -> SetVisAttributes(G4VisAttributes::Invisible);
// ----------------------------------------- a lead box
// --- this box has been cut in an inner and outer volume, for setting
// --- different cuts for secondaries in the volumes
G4Box* leadshield_box
= new G4Box("leadshield_box", 32.5*cm, 32.5*cm, 50.0*cm);
G4LogicalVolume* leadshield_log
= new G4LogicalVolume(leadshield_box, Pb, "leadshield_log", 0, 0, 0);
leadshield_log -> SetVisAttributes(Pbcolour);
new G4PVPlacement(0, G4ThreeVector(), leadshield_log, "leadshield", experimentalHall_log, false, 0);
// ----------------------------------------- a copper shielding
G4Box* coppershield_box
= new G4Box("coppershield_box", 12.5*cm, 12.5*cm, 25.0*cm);
G4LogicalVolume* coppershield_log
= new G4LogicalVolume(coppershield_box, Cu, "coppershield_log", 0, 0, 0);
coppershield_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(), coppershield_log, "coppershield",
leadshield_log, false, 0);
// ----------------------------------------- the nitrogen filled chamber
G4Box* Nfilledchamber_box
= new G4Box("Nfilledchamber_box", 8.0*cm, 8.0*cm, 11.6*cm);
G4LogicalVolume* Nfilledchamber_log
= new G4LogicalVolume(Nfilledchamber_box, N2Gas, "Nfilledchamber_log", 0, 0, 0);
Nfilledchamber_log -> SetVisAttributes(N2Gascolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 11.6),
Nfilledchamber_log, "Nfilledchamber",coppershield_log, false, 0);
/* // ----------------------------------------- the nitrogen filled chamber
G4Tubs* Nfilledchamber_tubs
= new G4Tubs("Nfilledchamber_tubs", 0., 5.*cm, 11.6*cm, 0, twopi);
G4LogicalVolume* Nfilledchamber_log
= new G4LogicalVolume(Nfilledchamber_tubs, N2Gas, "Nfilledchamber_log", 0, 0, 0);
Nfilledchamber_log -> SetVisAttributes(N2Gascolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 11.6),
Nfilledchamber_log, "Nfilledchamber",coppershield_log, false, 0);
*/
// ------------------------------------------------------------------------------
// --- Cryostat
// ------------------------------------------------------------------------------
// --- endcap window (MGePV D95.0)
G4Tubs* detwindow_tubs = new G4Tubs("detwindow_tubs", 0, 4.6*cm, 0.075*cm, 0, twopi);
G4LogicalVolume* detwindow_log
= new G4LogicalVolume(detwindow_tubs, Cu, "detwindow_log", 0, 0, 0);
detwindow_log -> SetVisAttributes(Cucolour);
// G4RotationMatrix* detwindow_rot = new G4RotationMatrix;
// detwindow_rot -> rotateX(0.5*M_PI);
new G4PVPlacement(0 , G4ThreeVector(0., 0, 2.825*cm),
detwindow_log,
"detwindow",Nfilledchamber_log, false, 0);
// reference point of top of endcap: 2.9 cm
// --- detector endcap
G4Tubs* dethousing_tubs = new G4Tubs("dethousing_tubs", 4.6*cm, 4.75*cm, 7.25*cm, 0, twopi);
G4LogicalVolume* dethousing_log
= new G4LogicalVolume(dethousing_tubs, Cu, "dethousing_log", 0, 0, 0);
dethousing_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0, -4.35*cm),
dethousing_log, "dethousing", Nfilledchamber_log, false, 0);
// --- the vacuum inside the endcap
G4VSolid* detvacuum_tubs = new G4Tubs("detvacuum_tubs",
0., 4.6*cm, 7.175*cm, 0, twopi);
G4LogicalVolume* detvacuum_log
= new G4LogicalVolume(detvacuum_tubs, Vacuum, "detvacuum_log", 0, 0, 0);
detvacuum_log -> SetVisAttributes(Vacuumcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., -4.425*cm),
detvacuum_log, "detvacuum", Nfilledchamber_log, false, 0);
// --- the mylar (PET) cladding inside endcap, introduce new variable dethousingclad_tubs
G4Tubs* dethousingclad_tubs = new G4Tubs("dethousingclad_tubs",
4.575*cm, 4.6*cm, 6.775*cm, 0, twopi);
G4LogicalVolume* dethousingclad_log
= new G4LogicalVolume(dethousingclad_tubs, PET, "dethousingclad_log", 0, 0, 0);
dethousingclad_log -> SetVisAttributes(PETcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., -0.4*cm),
dethousingclad_log, "dethousingclad", detvacuum_log, false, 0);
// ------------------------------------------------------------->>>
// ------------ Detector and holder - changeable position, size
// --- the mantle of the copper holder
G4Tubs* detinnercap_tubs = new G4Tubs("detinnercap_tubs",
crystal_d/2+0.005*cm, crystal_d/2+0.055*cm, 6.34*cm, 0, twopi);
G4LogicalVolume* detinnercap_log
= new G4LogicalVolume(detinnercap_tubs, Cu, "detinnercap_log", 0, 0, 0);
detinnercap_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 0.835*cm-d_z),
detinnercap_log, "detinnercap", detvacuum_log, false, 0);
// --- the bottom of the copper holder
G4Tubs* detinnercapbottom_tubs = new G4Tubs("detinnercapbottom_tubs",
0.8*cm, crystal_d/2+0.055*cm, 0.16*cm, 0, 2*M_PI);
G4LogicalVolume* detinnercapbottom_log
= new G4LogicalVolume(detinnercapbottom_tubs, Cu, "detinnercapbottom_log", 0, 0, 0);
detinnercapbottom_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., -5.665*cm-d_z),
detinnercapbottom_log, "detinnercapbottom", detvacuum_log, false, 0);
// --- the rings of the copper holder, introduce new variable detinnercapring_tubs
G4Tubs* detinnercapring_tubs = new G4Tubs("detinnercapring_tubs",
crystal_d/2+0.055*cm, crystal_d/2+0.255*cm, 0.3*cm, 0, twopi);
G4LogicalVolume* detinnercapring_log
= new G4LogicalVolume(detinnercapring_tubs, Cu, "detinnercapring_log", 0, 0, 0);
detinnercapring_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 5.3*cm-d_z),
detinnercapring_log, "detinnercapring", detvacuum_log, false, 0);
new G4PVPlacement(0, G4ThreeVector(0., 0., 0.5*cm-d_z),
detinnercapring_log, "detinnercapring", detvacuum_log, false, 1);
// --- the mylar (PET) IR shield on the crystal, introduce new variable detinnercaptop_tubs
G4Tubs* detinnercaptop_tubs = new G4Tubs("detinnercaptop_tubs",
0., crystal_d/2+0.005*cm, 0.00225*cm, 0, twopi);
G4LogicalVolume* detinnercaptop_log
= new G4LogicalVolume(detinnercaptop_tubs, PET, "detinnercaptop_log", 0, 0, 0);
detinnercaptop_log -> SetVisAttributes(PETcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 7.1975*cm-d_z),
detinnercaptop_log, "detinnercaptop", detvacuum_log, false, 0);
// --- gold plating on mylar IR shield, introduce new variable detinnercapplate_tubs
G4Tubs* detinnercapplate_tubs = new G4Tubs("detinnercapplate_tubs",
0., crystal_d/2.+0.005*cm, 0.00025*cm, 0, twopi);
G4LogicalVolume* detinnercapplate_log
= new G4LogicalVolume(detinnercapplate_tubs, Au, "detinnercapplate_log", 0, 0, 0);
detinnercapplate_log -> SetVisAttributes(Aucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 7.22025*cm-d_z),
detinnercapplate_log, "detinnercapplate", detvacuum_log, false, 0);
// --- the PE retainer, introduce new variable detpecap_tubs, side
G4Tubs* detpecap_tubs = new G4Tubs("detpecap_tubs",
crystal_d/2+0.055*cm, crystal_d/2+0.155*cm, 0.4*cm, 0, twopi);
G4LogicalVolume* detpecap_log
= new G4LogicalVolume(detpecap_tubs, PE, "detpecap_log", 0, 0, 0);
detpecap_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 6.9205*cm-d_z),
detpecap_log, "detpecap", detvacuum_log, false, 0);
// --- the PE retainer, top, introduce new variable detpecaptop_tubs
G4Tubs* detpecaptop_tubs = new G4Tubs("detpecaptop_tubs",
crystal_d/2.-0.145*cm, crystal_d/2.+0.055*cm, 0.05*cm, 0, twopi);
G4LogicalVolume* detpecaptop_log
= new G4LogicalVolume(detpecaptop_tubs, PE, "detpecaptop_log", 0, 0, 0);
detpecaptop_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 7.2705*cm-d_z),
detpecaptop_log, "detpecaptop", detvacuum_log, false, 0);
// ---------------------------------------------------------------------
// --- the crystal with dead layer
// ---------------------------------------------------------------------
G4Tubs* detdeadlayer_tubs = new G4Tubs("detdeadlayer_tubs",
0., crystal_d/2, crystal_l/2, 0, twopi);
G4LogicalVolume* detdeadlayer_log
= new G4LogicalVolume(detdeadlayer_tubs, GeLi, "detdeadlayer_log", 0, 0, 0);
detdeadlayer_log -> SetVisAttributes(GeLicolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 7.175*cm-crystal_l/2-d_z),
detdeadlayer_log, "detdeadlayer", detvacuum_log, false, 0);
// --- crystal without dead layer: -----------------------------------
G4Tubs* crystal_tubs = new G4Tubs("crystal_tubs", 0., crystal_d/2-d_dead, crystal_l/2-d_dead/2, 0, twopi);
G4LogicalVolume* crystal_log
= new G4LogicalVolume(crystal_tubs, Ge, "crystal_log", 0, 0, 0);
crystal_log -> SetVisAttributes(Gecolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., -d_dead/2),
crystal_log, "crystal", detdeadlayer_log, false, 0);
// --- hole in crystal -----------------------------------
G4Tubs* detholeincrystal_tubs = new G4Tubs("detholeincrystal_tubs", 0., hole_d/2, hole_l/2-hole_d/4, 0, twopi);
G4Sphere* detholeincrystal_bullet = new G4Sphere("detholeincrystal_bullet", 0., hole_d/2, 0, twopi, 0, twopi/4);
G4VSolid* dethole_shape=0;
dethole_shape = new G4UnionSolid("dethole_shape", detholeincrystal_tubs,
detholeincrystal_bullet, 0, G4ThreeVector (0.,0.,hole_l/2-hole_d/4));
G4LogicalVolume* dethole_log
= new G4LogicalVolume(dethole_shape, Vacuum, "dethole_log", 0, 0, 0);
dethole_log -> SetVisAttributes(Vacuumcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -(crystal_l/2-d_dead/2)+hole_l/2), dethole_log,
"dethole", crystal_log, false, 0);
/* --- hole in dead layer -----------------------------------
G4Tubs* detholeindeadlayer_tubs = new G4Tubs("detholeindeadlayer_tubs", 0., 0.55*cm, d_dead/2, 0, twopi);
G4LogicalVolume* detholeindeadlayer_log
= new G4LogicalVolume(detholeindeadlayer_tubs, Vacuum, "detholeindeadlayer_log", 0, 0, 0);
detholeindeadlayer_log -> SetVisAttributes(Vacuumcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., -crystal_l/2+d_dead/2),
detholeindeadlayer_log, "detholeindeadlayer", detdeadlayer_log, false, 0);
*/
// --- a conical ring of dead material (bulleting of crystal)
// G4double x = 0;
G4double y = 0;
y = hole_d/2*(sqrt(2.)-1)/cos(twopi/8);
G4Cons* detdeadcone_cons = new G4Cons("detdeadcone_cons",
crystal_d/2-0.0001*cm-d_dead, crystal_d/2-d_dead,
crystal_d/2-y-d_dead, crystal_d/2-d_dead, y/2, 0, twopi);
G4LogicalVolume* detdeadcone_log
= new G4LogicalVolume(detdeadcone_cons, GeLi, "detdeadcone_log", 0, 0, 0);
detdeadcone_log -> SetVisAttributes(GeLicolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., crystal_l/2-d_dead-y/2),
detdeadcone_log, "detdeadcone", crystal_log, false, 0);
// ---------------------------------------------------------------------------------------
// --- The samples
// ---------------------------------------------------------------------------------------
// --- a plastic cap between detector and sample (part of the standard box)
/*
G4Tubs* detplasticcap_tubs = new G4Tubs("detplasticcap_tubs",
0.*cm, (3.81+0.2)*cm, 0.05*cm, 0, twopi);
// die 0.2 muessen wieder weg spaeter!!! Nur fuer asymmetrische NPL-Probe!
G4LogicalVolume* detplasticcap_log
= new G4LogicalVolume(detplasticcap_tubs, Acrylic, "detplasticcap_log", 0, 0, 0);
detinnercapbottom_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 10.85*cm),
detplasticcap_log,
"detplasticcap", Nfilledchamber_log, false, 0);
*/
G4VSolid* sample_shape=0;
if (G4Material::GetMaterial(sample_mat_name))
sample_mat = G4Material::GetMaterial(sample_mat_name);
else
{
MGLog(error) << "The material called " << sample_mat_name << " does not exist" << endlog;
MGLog(error) << "Please check and run MaGe again!" << endlog;
G4Exception("MGEPVDetector::ConstructDetector()","err001",FatalException,"Exit MaGe");
}
/*
G4VSolid* marinelli_body = new G4Box("marinelli_body", marinellibox_width/2, marinellibox_height/2, marinellibox_thickness/2);
G4VSolid* marinelli_hole = new G4Tubs("marinelli_hole", 0, marinellitube_outrad, marinellitube_length/2, 0, 2*M_PI);
G4VSolid* marinelli_sample = new G4SubtractionSolid("marinelli_sample", marinelli_body, marinelli_hole, 0, G4ThreeVector (0.,0.,-3.2*cm));
G4VSolid* marinelli_body = new G4Box("marinelli_body", 92.5, 88, 87.5);
G4VSolid* marinelli_hole = new G4Tubs("marinelli_hole", 0, 49.5, 55.5, 0, 2*M_PI);
G4VSolid* marinelli_sample = new G4SubtractionSolid("marinelli_sample", marinelli_body, marinelli_hole, 0, G4ThreeVector (0.,0.,-3.2*cm));
G4VSolid* marinelli_body = new G4Box("marinelli_body", 97.5, 97.5, 119);
G4VSolid* marinelli_hole = new G4Tubs("marinelli_hole", 0, 45, 59.5, 0, 2*M_PI);
G4VSolid* marinelli_sample = new G4SubtractionSolid("marinelli_sample", marinelli_body, marinelli_hole, 0, G4ThreeVector (0.,0.,-5.951*cm));
*/
// GAMA small Marinelli (141G and 138G)
if (sample_geom=="smarinelli")
{
// ----------------------------------------- Cu block in nitrogen filled chamber
G4Box* Cublock_box
= new G4Box("Cublock_box", 8.0*cm, 8.0*cm, 3.15*cm);
// ----------------------------------------- drilled hole in Cu block in nitrogen filled chamber
G4Tubs* drhole_tubs
= new G4Tubs("drhole_tubs", 0., 5.0*cm, 3.15*cm, 0, twopi);
// ----------------------------------------- combined geometry for Cu shield in nitrogen filled chamber
G4VSolid* Cu_shield=0;
Cu_shield = new G4SubtractionSolid("Cu_shield", Cublock_box,
drhole_tubs, 0, G4ThreeVector (0.,0.,0.));
G4LogicalVolume* Cu_shield_log
= new G4LogicalVolume(Cu_shield, Cu, "Cu_shield_log", 0, 0, 0);
Cu_shield_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -8.45*cm), Cu_shield_log,
"Cu_shield", Nfilledchamber_log, false, 0);
// bottom part with hole
G4VSolid* marinellis1l_tubs = new G4Tubs("marinellis1l_tubs", 4.95*cm, 7.35*cm, 0.1*cm, 0, twopi);
G4LogicalVolume* marinellis1l_tubs_log
= new G4LogicalVolume(marinellis1l_tubs, PE, "marinellis1l_tubs_log", 0, 0, 0);
marinellis1l_tubs_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -5.2*cm), marinellis1l_tubs_log, "marinellis1l_tubs",
Nfilledchamber_log, false, 0);
// lower part outside
G4VSolid* marinellis1o_tubs = new G4Tubs("marinellis1o_tubs", 7.25*cm, 7.35*cm, 3.9*cm, 0, twopi);
G4LogicalVolume* marinellis1o_tubs_log
= new G4LogicalVolume(marinellis1o_tubs, Polypropylene, "marinellis1o_tubs_log", 0, 0, 0);
marinellis1o_tubs_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -1.2*cm), marinellis1o_tubs_log, "marinellis1o_tubs",
Nfilledchamber_log, false, 0);
// upper part outside
G4VSolid* marinellis2o_cons = new G4Cons("marinellis2o_cons",
7.3*cm, 7.4*cm, 7.675*cm, 7.775*cm, 4.3*cm, 0, twopi);
G4LogicalVolume* marinellis2o_cons_log
= new G4LogicalVolume(marinellis2o_cons, Polypropylene, "marinellis2o_cons_log", 0, 0, 0);
marinellis2o_cons_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, 7.0*cm), marinellis2o_cons_log, "marinellis2o_cons",
Nfilledchamber_log, false, 0);
// inner cone
G4VSolid* marinellis1i_cons = new G4Cons("marinellis1i_cons",
4.95*cm, 5.05*cm, 4.8*cm, 4.9*cm, 5.075*cm, 0, twopi);
G4LogicalVolume* marinellis1i_cons_log
= new G4LogicalVolume(marinellis1i_cons, Polypropylene, "marinellis1i_cons_log", 0, 0, 0);
marinellis1i_cons_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -0.025*cm), marinellis1i_cons_log, "marinellis1i_cons",
Nfilledchamber_log, false, 0);
// lower volume 630.28 cm^3 (straight outside walls with Ri 7.15 cm),
// middle volume (conical inside AND outside) 215.49 cm^3, and upper volume (conical outside) 1097.14 cm^3.
// top lid of inner cone
G4VSolid* marinellis2l_tubs = new G4Tubs("marinellis2l_tubs", 0*cm, 4.8*cm, 0.075*cm, 0, twopi);
G4LogicalVolume* marinellis2l_tubs_log
= new G4LogicalVolume(marinellis2l_tubs, PE, "marinellis2l_tubs_log", 0, 0, 0);
marinellis2l_tubs_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, 4.975*cm), marinellis2l_tubs_log, "marinellis2l_tubs",
Nfilledchamber_log, false, 0);
// top lid of whole Marinelli
G4VSolid* marinellis3l_tubs = new G4Tubs("marinellis3l_tubs", 0*cm, 7.9*cm, 0.1*cm, 0, twopi);
G4LogicalVolume* marinellis3l_tubs_log
= new G4LogicalVolume(marinellis3l_tubs, PE, "marinellis3l_tubs_log", 0, 0, 0);
marinellis3l_tubs_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, 11.4*cm), marinellis3l_tubs_log, "marinellis3l_tubs",
Nfilledchamber_log, false, 0);
// total (real): 1.94291 l
// sample inside GAMA small Marinelli
G4double fillingheight = 0;
G4double sampleheight = 0;
sample_shape = 0;
fillingheight = 16.4*cm-smarinelli_height;
if ((fillingheight>=0*cm) && (fillingheight<=7.8*cm))
{
sample_shape = new G4Cons("sample_shape",
5.05*cm,
7.25*cm,
5.05*cm-(0.15/10.15*fillingheight),
7.25*cm,
fillingheight/2., 0, twopi);
sampleheight=fillingheight/2.;
G4LogicalVolume* sample_log
= new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(0, 0, (-5.1*cm+sampleheight)), sample_log,
"sample", Nfilledchamber_log, false, 0);
}
else if ((fillingheight>7.8*cm) && (fillingheight<=10.15*cm))
{
G4VSolid* smarinellisample1_cons = new G4Cons("smarinellisample1_cons",
5.05*cm,
7.25*cm,
5.05*cm-(0.15/10.15*7.8*cm),
7.25*cm,
3.9*cm, 0, twopi);
G4VSolid* smarinellisample2_cons = new G4Cons("smarinellisample2_cons",
5.05*cm-(0.15/10.15*7.8*cm),
7.3*cm,
5.05*cm-(0.15/10.15*fillingheight),
7.3*cm+((0.375/8.6)*(fillingheight-7.8*cm)),
fillingheight/2.-3.9*cm, 0, twopi);
sample_shape = new G4UnionSolid("sample_shape", smarinellisample1_cons,
smarinellisample2_cons, 0, G4ThreeVector (0.,0.,fillingheight/2.));
sampleheight=fillingheight/2.;
G4LogicalVolume* sample_log
= new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -1.2*cm), sample_log,
"sample", Nfilledchamber_log, false, 0);
}
else if ((fillingheight>10.15*cm) && (fillingheight<=16.4*cm))
{
G4VSolid* smarinellisample1_cons = new G4Cons("smarinellisample1_cons",
5.05*cm,
7.25*cm,
5.05*cm-(0.15/10.15*7.8*cm),
7.25*cm,
3.9*cm, 0, twopi);
G4VSolid* smarinellisample2_cons = new G4Cons("smarinellisample2_cons",
5.05*cm-(0.15/10.15*7.8*cm),
7.3*cm,
4.9*cm,
7.3*cm+((0.375/8.6)*2.35*cm),
1.175*cm, 0, twopi);
G4VSolid* temp_shape=0;
temp_shape = new G4UnionSolid("temp_shape", smarinellisample1_cons,
smarinellisample2_cons, 0, G4ThreeVector (0.,0.,3.9*cm+1.175*cm));
G4VSolid* smarinellisample3_cons = new G4Cons("smarinellisample3_cons",
0*cm,
7.3*cm+((0.375/8.6)*2.35*cm),
0*cm,
7.3*cm+((0.375/8.6)*(fillingheight-7.8*cm)),
(fillingheight-10.15*cm)/2., 0, twopi);
sample_shape = new G4UnionSolid("sample_shape", temp_shape, smarinellisample3_cons,
0, G4ThreeVector (0.,0.,6.25*cm+(fillingheight-10.15*cm)/2.));
sampleheight=fillingheight/2.;
G4LogicalVolume* sample_log
= new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -1.2*cm), sample_log,
"sample", Nfilledchamber_log, false, 0);
}
else
{
MGLog(error) << "The value for the empty space above the filling is not correct" << endlog;
G4Exception("MGEPVDetector::ConstructDetector()","err002",FatalException,"Check the value");
MGLog(fatal) << endlog;
}
}
else if (sample_geom=="box")
{
sample_shape = new G4Box("sample_box", box_width/2, box_height/2, box_thickness/2);
}
else if (sample_geom=="tube")
{
sample_shape = new G4Tubs("sample_tube",
tube_innrad, tube_outrad, tube_length/2, 0, tube_angle);
}
else if (sample_geom=="sphere")
{
//---- for point-like sources
sample_shape = new G4Sphere("sample_sphere",
0., sphere_rad, 0.,twopi, 0.,twopi);
}
else if (sample_geom=="sbox")
{
// ----------------------------------------- Cu block in nitrogen filled chamber
G4Box* Cublock_box = new G4Box("Cublock_box", 8.0*cm, 8.0*cm, 9.9*cm);
// ----------------------------------------- drilled hole in Cu block in nitrogen filled chamber
G4Tubs* drhole_tubs = new G4Tubs("drhole_tubs", 0., 5.0*cm, 9.9*cm, 0, twopi);
// ----------------------------------------- combined geometry for Cu shield in nitrogen filled chamber
G4VSolid* Cu_shield=0;
Cu_shield = new G4SubtractionSolid("Cu_shield", Cublock_box,
drhole_tubs, 0, G4ThreeVector (0.,0.,0.));
G4LogicalVolume* Cu_shield_log
= new G4LogicalVolume(Cu_shield, Cu, "Cu_shield_log", 0, 0, 0);
Cu_shield_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -1.7*cm), Cu_shield_log,
"Cu_shield", Nfilledchamber_log, false, 0);
// --- sample holder is 0.3 cm from detector window
G4Tubs* sample_holder_tubs = new G4Tubs("sample_holder_tubs", 4.75*cm, 5.0*cm, 2.5*cm, 0, twopi);
G4LogicalVolume* sample_holder_log
= new G4LogicalVolume(sample_holder_tubs, Cu, "sample_holder_log", 0, 0, 0);
sample_holder_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 5.7*cm), sample_holder_log,
"sample_holder",Nfilledchamber_log, false, 0);
// --- Cu support ring of sample holder 0.25 cm thickness
G4Tubs* sample_holder_ring_tubs = new G4Tubs("sample_holder_ring_tubs", 4.6*cm, 4.75*cm, 0.125*cm, 0, twopi);
G4LogicalVolume* sample_holder_ring_log
= new G4LogicalVolume(sample_holder_ring_tubs, Cu, "sample_holder_ring_log", 0, 0, 0);
sample_holder_ring_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 3.325*cm), sample_holder_ring_log,
"sample_holder_ring",Nfilledchamber_log, false, 0);
// --- support ring in sample holder is 0.55 cm from detector window
G4Tubs* supportring_tubs = new G4Tubs("supportring_tubs", di_sring/2., do_sring/2., h_sring/2., 0, twopi);
G4LogicalVolume* supportring_log
= new G4LogicalVolume(supportring_tubs, Acrylic, "supportring_log", 0, 0, 0);
supportring_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 3.45*cm+h_sring/2.), supportring_log,
"supportring",Nfilledchamber_log, false, 0);
// --- standard cylindrical box (bottom side facing detector)
G4Tubs* detplasticbottom_tubs = new G4Tubs("detplasticbottom_tubs", 0.*cm, do_sbox/2., 0.05*cm, 0, twopi);
G4LogicalVolume* detplasticbottom_log
= new G4LogicalVolume(detplasticbottom_tubs, PS, "detplasticbottom_log", 0, 0, 0);
detplasticbottom_log -> SetVisAttributes(PETcolour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+3.45*cm+h_sring+0.05*cm),
detplasticbottom_log, "detplasticbottom",Nfilledchamber_log, false, 0);
G4Tubs* detplasticcap_tubs = new G4Tubs("detplasticcap_tubs", 0.*cm, do_sbox/2., 0.05*cm, 0, twopi);
G4LogicalVolume* detplasticcap_log
= new G4LogicalVolume(detplasticcap_tubs, PS, "detplasticcap_log", 0, 0, 0);
detplasticcap_log -> SetVisAttributes(PETcolour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+3.45*cm+h_sring+h_sbox+0.15*cm),
detplasticcap_log, "detplasticcap", Nfilledchamber_log, false, 0);
G4Tubs* detplasticshell_tubs = new G4Tubs("detplasticshell_tubs", di_sbox/2., do_sbox/2., h_sbox, 0,twopi);
G4LogicalVolume* detplasticshell_log
= new G4LogicalVolume(detplasticshell_tubs, PS, "detplasticshell_log", 0, 0, 0);
detplasticshell_log -> SetVisAttributes(PETcolour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+3.45*cm+h_sring+h_sbox/2.+0.1*cm),
detplasticshell_log, "detplasticshell", Nfilledchamber_log, false, 0);
// cylinder sample inside the standard box (default - filling the volume)
sample_shape = new G4Tubs("sample_tube",
tube_innrad, tube_outrad, tube_length/2, 0, tube_angle);
G4LogicalVolume* sample_log = new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, 3.45*cm+h_sring+tube_length/2.+0.1*cm),
sample_log, "sample", Nfilledchamber_log, false, 0);
}
else if (sample_geom=="twobox")
{
//---for point like sources, two boxes away from detector (strong sources)
// --- standard cylindrical box (cover side facing detector) - 0.1cm from detector window
G4Tubs* detplasticcap_tubs = new G4Tubs("detplasticcap_tubs", 0.*cm, 3.81*cm, 0.05*cm, 0, twopi);
G4LogicalVolume* detplasticcap_log
= new G4LogicalVolume(detplasticcap_tubs, Acrylic, "detplasticcap_log", 0, 0, 0);
detplasticcap_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 10.95*cm), detplasticcap_log, "detplasticcap",
Nfilledchamber_log, false, 0);
G4Tubs* detplasticcap1_tubs = new G4Tubs("detplasticcap1_tubs", 0.*cm,
3.81*cm, 0.075*cm, 0, twopi);
G4LogicalVolume* detplasticcap1_log
= new G4LogicalVolume(detplasticcap1_tubs, Acrylic, "detplasticcap1_log", 0, 0, 0);
detplasticcap1_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 13.875*cm), detplasticcap1_log,
"detplasticcap1", Nfilledchamber_log, false, 0);
G4Tubs* detplasticshell_tubs = new G4Tubs("detplasticshell_tubs", 3.65*cm, 3.81*cm, 1.4*cm, 0, twopi);
G4LogicalVolume* detplasticshell_log
= new G4LogicalVolume(detplasticshell_tubs, Acrylic, "detplasticshell_log", 0, 0, 0);
detplasticshell_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 12.4*cm), detplasticshell_log,
"detplasticshell", Nfilledchamber_log, false, 0);
// --- one more standard box between detector and sample - for strong sources measurement
new G4PVPlacement(0, G4ThreeVector(0., 0., 14.05*cm),
detplasticcap_log, "detplasticcap2", Nfilledchamber_log, false, 0);
new G4PVPlacement(0, G4ThreeVector(0., 0., 15.5*cm),
detplasticshell_log, "detplasticshell2", Nfilledchamber_log, false, 0);
new G4PVPlacement(0, G4ThreeVector(0., 0., 16.975*cm),
detplasticcap1_log, "detplasticcap3", Nfilledchamber_log, false, 0);
sample_shape = new G4Sphere("sample_shpere", 0., sphere_rad, 0.,twopi, 0.,twopi);
}
else if (sample_geom=="liquid")
{
// --- standard cylindrical box (cover side facing detector) - 0.1cm from detector window _____with liquid sample_____
G4Tubs* detplasticcap_tubs = new G4Tubs("detplasticcap_tubs", 0.*cm, 3.81*cm, 0.05*cm, 0, twopi);
G4LogicalVolume* detplasticcap_log
= new G4LogicalVolume(detplasticcap_tubs, Acrylic, "detplasticcap_log", 0, 0, 0);
detplasticcap_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+9.35*cm), detplasticcap_log,
"detplasticcap", Nfilledchamber_log, false, 0);
G4Tubs* detplasticcap1_tubs = new G4Tubs("detplasticcap1_tubs", 0.*cm, 3.81*cm, 0.075*cm, 0, twopi);
G4LogicalVolume* detplasticcap1_log
= new G4LogicalVolume(detplasticcap1_tubs, Acrylic, "detplasticcap1_log", 0, 0, 0);
detplasticcap1_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+12.275*cm), detplasticcap1_log,
"detplasticcap1", Nfilledchamber_log, false, 0);
G4Tubs* detplasticshell_tubs = new G4Tubs("detplasticshell_tubs", 3.65*cm, 3.81*cm, 1.4*cm, 0,twopi);
G4LogicalVolume* detplasticshell_log
= new G4LogicalVolume(detplasticshell_tubs, Acrylic, "detplasticshell_log", 0, 0, 0);
detplasticshell_log -> SetVisAttributes(Acryliccolour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+10.8*cm),
detplasticshell_log, "detplasticshell", Nfilledchamber_log, false, 0);
// liquid filling the standard box up to level of "boxheight"
G4VSolid* sample_body = new G4Tubs("sample_tube", tube_innrad, tube_outrad, tube_length/2, 0, tube_angle);
G4VSolid* sample_hole = new G4Box("sample_hole", tube_outrad+0.1*cm, tube_outrad, tube_length/2+0.1*cm);
sample_shape = new G4SubtractionSolid("sample_shape", sample_body, sample_hole, 0,
G4ThreeVector(0., 2*tube_outrad-box_height,0.));
}
else if (sample_geom=="marinelli")
{
// "marinelli" shape (box with a hole)
G4VSolid* sample_body = new G4Box("sample_box", box_width/2, box_height/2, box_thickness/2);
G4VSolid* sample_hole = new G4Tubs("sample_tube", 0., tube_outrad, tube_length, 0, tube_angle);
sample_shape = new G4SubtractionSolid("sample_shape", sample_body, sample_hole, 0,
G4ThreeVector(0., tube_innrad, -box_thickness/2));
}
else if (sample_geom=="custom")
{
// ----------------------------------------- Cu block in nitrogen filled chamber
G4Box* Cublock_box
= new G4Box("Cublock_box", 8.0*cm, 8.0*cm, 3.15*cm);
// ----------------------------------------- drilled hole in Cu block in nitrogen filled chamber
G4Tubs* drhole_tubs
= new G4Tubs("drhole_tubs", 0., 5.0*cm, 3.15*cm, 0, twopi);
// ----------------------------------------- combined geometry for Cu shield in nitrogen filled chamber
G4VSolid* Cu_shield=0;
Cu_shield = new G4SubtractionSolid("Cu_shield", Cublock_box,
drhole_tubs, 0, G4ThreeVector (0.,0.,0.));
G4LogicalVolume* Cu_shield_log
= new G4LogicalVolume(Cu_shield, Cu, "Cu_shield_log", 0, 0, 0);
Cu_shield_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0, 0, -8.45*cm), Cu_shield_log,
"Cu_shield", Nfilledchamber_log, false, 0);
// here define any other sample geometry
/*
// -------- cylindric part of NALGENE bottle
G4Tubs* Nalgenebottle_tubs = new G4Tubs("Nalgenebottle_tubs", 2.4*cm, 2.5*cm, 3.4*cm, 0, twopi);
G4LogicalVolume* Nalgenebottle_log = new G4LogicalVolume(Nalgenebottle_tubs, Polypropylene,
"Nalgenebottle_log", 0, 0, 0);
Nalgenebottle_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 7.0*cm), Nalgenebottle_log,
"Nalgenebottle", Nfilledchamber_log, false, 0);
// -------- conical bottom side part of NALGENE bottle
G4Cons* Nalgenebottle_cons = new G4Cons("Nalgenebottle_cons", 2.1*cm, 2.2*cm, 2.4*cm, 2.5*cm, 0.3*cm, 0, twopi);
G4LogicalVolume* Nalgenebottleside_log = new G4LogicalVolume(Nalgenebottle_cons, Polypropylene,
"Nalgenebottleside_log", 0, 0, 0);
Nalgenebottleside_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 3.3*cm), Nalgenebottleside_log,
"Nalgenebottleside", Nfilledchamber_log, false, 0);
// -------- conical shell bottom part of NALGENE bottle
G4Cons* Nalgenebottleb_cons = new G4Cons("Nalgenebottleb_cons", 2.0*cm, 2.09*cm, 0.4*cm, 0.5*cm, 0.3*cm, 0, twopi);
G4LogicalVolume* Nalgenebottlebottom_log = new G4LogicalVolume(Nalgenebottleb_cons, Polypropylene,
"Nalgenebottlebottom_log", 0, 0, 0);
Nalgenebottlebottom_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 3.3*cm), Nalgenebottlebottom_log,
"Nalgenebottlebottom", Nfilledchamber_log, false, 0);
// -------- cylindrical shell bottom part of NALGENE bottle
G4Tubs* Nalgenebottleb_tubs = new G4Tubs("Nalgenebottleb_tubs", 0.*cm, 0.4*cm, 0.05*cm, 0, twopi);
G4LogicalVolume* Nalgenebottlebottomt_log = new G4LogicalVolume(Nalgenebottleb_tubs, Polypropylene,
"Nalgenebottlebottomt_log", 0, 0, 0);
Nalgenebottlebottomt_log -> SetVisAttributes(PEcolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 3.55*cm), Nalgenebottlebottomt_log,
"Nalgenebottlebottomt", Nfilledchamber_log, false, 0);
// cylinder sample inside the NALGENE bottle (default - IAEA sample CU200903, 68.8 mm)
G4Tubs* temp1_tubs = new G4Tubs("temp1_tubs", 0.*cm, 2.4*cm, 3.19*cm, 0, twopi);
G4Cons* temp2_cons = new G4Cons("temp2_cons", 2.09*cm, 2.1*cm, 0.5*cm, 2.4*cm, 0.25*cm, 0, twopi);
sample_shape = new G4UnionSolid("sample_shape", temp2_cons, temp1_tubs, 0, G4ThreeVector (0.,0., 3.44*cm));
G4LogicalVolume* sample_log = new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 3.35*cm),
sample_log, "sample", Nfilledchamber_log, false, 0);
*/
// --- Cu tap 0.3 cm from detector window
G4Tubs* cu_tap_tubs = new G4Tubs("cu_tap_tubs", 0*cm, 5.0*cm, 2.5*cm, 0, twopi);
G4LogicalVolume* cu_tap_log
= new G4LogicalVolume(cu_tap_tubs, Cu, "cu_tap_log", 0, 0, 0);
cu_tap_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., 5.7*cm), cu_tap_log, "cu_tap",Nfilledchamber_log, false, 0);
// ENEA, 3 Nb disks
/*
G4VSolid* temp1_shape = new G4Tubs("temp1_shape", 0., 0.9*cm, 0.05*cm, 0, twopi);
G4VSolid* temp2_shape = new G4Tubs("temp2_shape", 0., 0.9*cm, 0.05*cm, 0, twopi);
G4VSolid* temp3_shape=0;
temp3_shape = new G4UnionSolid("temp3_shape", temp1_shape,
temp2_shape, 0, G4ThreeVector (1.8*cm,0.,0.));
sample_shape = new G4UnionSolid("sample_shape", temp3_shape, temp1_shape,
0, G4ThreeVector (0.9*cm,1.56*cm,0.));
G4LogicalVolume* sample_log
= new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(-0.9*cm, -0.52*cm, 2.95*cm), sample_log,
"sample", Nfilledchamber_log, false, 0);
*/
//ElectronicsJunctionSled
//ElectronicsJunctionSledPins
//ElectronicsJunctionSledSpacer
//ElectronicsJunctionSledCables
/* //The Iglidur sample on a Cu holder (6 sticks, only 1 simulated)
sample_shape = new G4Tubs("sample_tube", tube_innrad, tube_outrad, tube_length/2, 0, tube_angle);
G4Tubs* CuStick_tubs = new G4Tubs("CuStick_tubs", 0., 0.5*cm, tube_length/2, 0, 2*M_PI);
G4LogicalVolume* CuStick_log = new G4LogicalVolume(CuStick_tubs, Cu, "CuStick_log", 0, 0, 0);
CuStick_log -> SetVisAttributes(Cucolour);
new G4PVPlacement(0, G4ThreeVector(0., s_y+0.2*cm, s_z), CuStick_log, "CuStick", Nfilledchamber_log, false, 0);
*/
/*
// The NPL bottle with definable liquid level - tube_leght
G4Tubs* NPLbottle_tubs = new G4Tubs("NPLbottle_tubs", 0., 3.62*cm, 7*cm, 0, 2*M_PI);
G4LogicalVolume* NPLbottle_log = new G4LogicalVolume(NPLbottle_tubs, PE, "NPLbottle_log", 0, 0, 0);
NPLbottle_log -> SetVisAttributes(Acryliccolour);
//rotate 90deg up :
G4RotationMatrix* halfPi_x_rot = new G4RotationMatrix;
halfPi_x_rot -> rotateX(0.5*M_PI);
//position : touching detector window; on bottom of the Chamber
new G4PVPlacement(halfPi_x_rot, G4ThreeVector(0., -3.15*cm, 14.42*cm), NPLbottle_log, "NPLbottle", Nfilledchamber_log, false, 0);
//contents :
G4Tubs* NPLbottleN2_tubs = new G4Tubs("NPLbottleN2_tubs", 0., 3.5*cm, 6.6*cm-tube_length/2, 0, 2*M_PI); //filled with N2 of the chamber (?)
G4LogicalVolume* NPLbottleN2_log = new G4LogicalVolume(NPLbottleN2_tubs, N2Gas, "NPLbottleN2_log", 0, 0, 0);
NPLbottleN2_log -> SetVisAttributes(N2Gascolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., tube_length/2), NPLbottleN2_log, "NPLbottleN2", NPLbottle_log, false, 0);
//needs to change macro confine volume ---> NPLsample
G4Tubs* NPLsample_tubs = new G4Tubs("NPLsample_tubs", 0., 3.5*cm, tube_length/2, 0, twopi);
G4LogicalVolume* NPLsample_log = new G4LogicalVolume(NPLsample_tubs, sample_mat, "NPLsample_log", 0, 0, 0);
NPLsample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(0., 0., tube_length/2-6.6*cm), NPLsample_log, "NPLsample", NPLbottle_log, false, 0);
//puts small ball away, to avoid errors
sample_shape = new G4Sphere("sample_shpere", 0., sphere_rad, 0.,twopi, 0.,twopi);
s_z = 15.*cm;
// The NPL small teflon bottle with definable height from chamber bottom - s_z and liquid level - tube_leght
G4Tubs* NPLbottle_tubs = new G4Tubs("NPLbottle_tubs", 0., 2.225*cm, 4.5*cm, 0, twopi);
G4LogicalVolume* NPLbottle_log = new G4LogicalVolume(NPLbottle_tubs, Teflon, "NPLbottle_log", 0, 0, 0);
NPLbottle_log -> SetVisAttributes(Acryliccolour);
//rotate 90deg up :
G4RotationMatrix* halfPi_x_rot = new G4RotationMatrix;
halfPi_x_rot -> rotateX(90*deg);
//position : touching detector window; on bottom of the Chamber
new G4PVPlacement(halfPi_x_rot, G4ThreeVector(0., s_y-5.65*cm, 13.025*cm), NPLbottle_log, "NPLbottle", Nfilledchamber_log, false, 0);
//contents :
G4Tubs* NPLbottleN2_tubs = new G4Tubs("NPLbottleN2_tubs", 0., 2.175*cm, 4.25*cm-tube_length/2, 0, twopi); //filled with N2 of the chamber (?)
G4LogicalVolume* NPLbottleN2_log = new G4LogicalVolume(NPLbottleN2_tubs, N2Gas, "NPLbottleN2_log", 0, 0, 0);
NPLbottleN2_log -> SetVisAttributes(N2Gascolour);
new G4PVPlacement(0, G4ThreeVector(0., 0., tube_length/2-0.05*cm), NPLbottleN2_log, "NPLbottleN2", NPLbottle_log, false, 0);
//needs to change macro confine volume ---> NPLsample
G4Tubs* NPLsample_tubs = new G4Tubs("NPLsample_tubs", 0., 2.175*cm, tube_length/2, 0, twopi);
G4LogicalVolume* NPLsample_log = new G4LogicalVolume(NPLsample_tubs, sample_mat, "NPLsample_log", 0, 0, 0);
NPLsample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(0., 0., tube_length/2-4.3*cm), NPLsample_log, "NPLsample", NPLbottle_log, false, 0);
//puts small ball away, to avoid errors
sample_shape = new G4Sphere("sample_sphere", 0., sphere_rad, 0.,twopi, 0.,twopi);
s_z = 15.*cm; */
}
else
{
// MGLog(error) << "The shape " << sample_geom << " is not defined " << endlog;
// MGLog(error) sample_geom << endlog;
G4cout << "\n\n" << sample_geom << "\n\n";
MGLog(fatal) << endlog;
G4Exception("MGEPVDetector::ConstructDetector()","err003",FatalException,"Check the shape type");
}
//G4LogicalVolume* sample_log = new G4LogicalVolume(marinelli_sample, sample_mat, "sample_log", 0, 0, 0);
/* G4LogicalVolume* sample_log = new G4LogicalVolume(sample_shape, sample_mat, "sample_log", 0, 0, 0);
sample_log -> SetVisAttributes(Sample_Colour);
new G4PVPlacement(0, G4ThreeVector(s_x, s_y, s_z+10.8*cm),
sample_log, "sample", Nfilledchamber_log, false, 0);
*/
//------------------------------------------------
// Sensitive detectors
//------------------------------------------------
G4SDManager* SDman = G4SDManager::GetSDMpointer();
G4String CrystalSDname = "/mydet/gerda/gecrystal";
GEGeometrySD* CrystalSD = new GEGeometrySD(CrystalSDname);
SDman->AddNewDetector(CrystalSD);
crystal_log-> SetSensitiveDetector(CrystalSD);
SetDetectorLogical(experimentalHall_log);
SetDetectorName("ExperimentalHall");
}
//---------------------------------------------------------------------------//
//---------------------------------------------------------------------------//
| [
"mgold@unm.edu"
] | mgold@unm.edu |
bb8b105fbb4a092b7365cf9dd81eb33f77d21f24 | 16dff48f417a54d27c15ed0ae82838a0a551108c | /src/Light.cpp | 88910e5812f3676acc933d0251c410efe109e6cf | [] | no_license | DanyFids/CG-Midterm | ccc69ec84c57857af51cb4e8df18013ad9ec3f52 | a55faebb0b176d2da991995071b06a996104ef45 | refs/heads/master | 2020-08-27T10:11:55.561803 | 2019-11-01T16:29:27 | 2019-11-01T16:29:27 | 217,328,445 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 8,344 | cpp | #include "Light.h"
#include "Shader.h"
#include<glad/glad.h>
#include<GLFW/glfw3.h>
#include<GLM/gtc/matrix_transform.hpp>
#include "Constants.h"
void Light::SetupOccRender()
{
lightView = glm::lookAt(position, glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f));
GLenum err;
glGenFramebuffers(1, &depthMapFBO);
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_2D, depthMap);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT,
SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
float borderColor[] = { 1.0f, 1.0f, 1.0f, 1.0f };
glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor);
err = glGetError();
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
err = glGetError();
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depthMap, 0);
err = glGetError();
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
Light::Light(glm::vec3 pos, bool orth)
{
if (orth) {
far_plane = 100.0f;
lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, 0.2f, far_plane);
}
else {
far_plane = 25.0f;
lightProjection = glm::perspective(glm::radians(90.0f), ((float)SHADOW_WIDTH / (float)SHADOW_HEIGHT), 0.2f, far_plane);
}
//lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, 0.2f, 100.0f);
position = pos;
ambient = glm::vec3(1.0, 1.0, 1.0);
diffuse = ambient;
specular = diffuse;
ambient_strength = 0.2f;
diffuse_strength = 0.5f;
specular_strength = 1.0f;
SetupOccRender();
}
Light::Light(glm::vec3 pos, glm::vec3 color, float a, float d, float s, bool orth)
{
if (orth) {
far_plane = 100.0f;
lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, 0.2f, far_plane);
}
else {
far_plane = 25.0f;
lightProjection = glm::perspective(glm::radians(90.0f), ((float)SHADOW_WIDTH / (float)SHADOW_HEIGHT), 0.2f, far_plane);
}
position = pos;
ambient = color;
diffuse = color;
specular = color;
ambient_strength = a;
diffuse_strength = d;
specular_strength = s;
SetupOccRender();
}
Light::Light(glm::vec3 pos, glm::vec3 ambi, glm::vec3 diff, glm::vec3 spec, float a, float d, float s, bool orth)
{
if (orth) {
far_plane = 100.0f;
lightProjection = glm::ortho(-10.0f, 10.0f, -10.0f, 10.0f, 0.2f, far_plane);
}
else {
far_plane = 25.0f;
lightProjection = glm::perspective(glm::radians(90.0f), ((float)SHADOW_WIDTH / (float)SHADOW_HEIGHT), 0.2f, far_plane);
}
position = pos;
ambient = ambi;
diffuse = diff;
specular = spec;
ambient_strength = a;
diffuse_strength = d;
specular_strength = s;
SetupOccRender();
}
void Light::SetUniformColor(glm::vec3 color)
{
ambient = diffuse = specular = color;
}
void Light::SetDiffuse(glm::vec3 color, float str)
{
diffuse = color;
diffuse_strength = str;
}
void Light::SetSpecular(glm::vec3 color, float str)
{
specular = color;
specular_strength = str;
}
void Light::SetStr(float a, float d, float s)
{
ambient_strength = a;
diffuse_strength = d;
specular_strength = s;
}
void Light::SetAmbient(glm::vec3 color, float str)
{
ambient = color;
ambient_strength = str;
}
void Light::SetupLight(Shader* shader)
{
shader->SetVec3("light.position", position);
shader->SetVec3("light.ambient", (ambient * ambient_strength));
shader->SetVec3("light.diffuse", (diffuse * diffuse_strength));
shader->SetVec3("light.specular", (specular * specular_strength));
}
void Light::SetupDepthShader(Shader* shader)
{
shader->SetMat4( "lightSpaceMatrix", lightProjection * lightView);
}
DirectionalLight::DirectionalLight(glm::vec3 pos):Light(pos){}
DirectionalLight::DirectionalLight(glm::vec3 pos, glm::vec3 color, float a, float d, float s):Light(pos, color, a, d, s){}
DirectionalLight::DirectionalLight(glm::vec3 pos, glm::vec3 ambi, glm::vec3 diff, glm::vec3 spec, float a, float d, float s):Light(pos, ambi, diff, spec, a, d, s){}
void DirectionalLight::SetupLight(Shader* shader)
{
shader->SetVec3("sun.pos", position);
shader->SetVec3("sun.ambient", (ambient * ambient_strength));
shader->SetVec3("sun.diffuse", (diffuse * diffuse_strength));
shader->SetVec3("sun.specular", (specular * specular_strength));
shader->SetI("sun.depthMap", 3);
shader->SetMat4("sunLSM", lightProjection * lightView);
}
void PointLight::SetupCubeMap()
{
glDeleteTextures(1, &depthMap);
glGenTextures(1, &depthMap);
glBindTexture(GL_TEXTURE_CUBE_MAP, depthMap);
for (unsigned int i = 0; i < 6; ++i)
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_DEPTH_COMPONENT,
SHADOW_WIDTH, SHADOW_HEIGHT, 0, GL_DEPTH_COMPONENT, GL_FLOAT, NULL);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_EDGE);
glDeleteFramebuffers(1, &depthMapFBO);
glGenFramebuffers(1, &depthMapFBO);
glBindFramebuffer(GL_FRAMEBUFFER, depthMapFBO);
glFramebufferTexture(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, depthMap, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
lightTransforms.push_back(glm::lookAt(position, position + glm::vec3(1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
lightTransforms.push_back(glm::lookAt(position, position + glm::vec3(-1.0f, 0.0f, 0.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
lightTransforms.push_back(glm::lookAt(position, position + glm::vec3(0.0f, 1.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)));
lightTransforms.push_back(glm::lookAt(position, position + glm::vec3(0.0f, -1.0f, 0.0f), glm::vec3(0.0f, 0.0f, -1.0f)));
lightTransforms.push_back(glm::lookAt(position, position + glm::vec3(0.0f, 0.0f, 1.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
lightTransforms.push_back(glm::lookAt(position, position + glm::vec3(0.0f, 0.0f, -1.0f), glm::vec3(0.0f, -1.0f, 0.0f)));
}
PointLight::PointLight(glm::vec3 pos):Light(pos, false)
{
linear = 0.7f;
quadratic = 1.8f;
SetupCubeMap();
}
PointLight::PointLight(glm::vec3 pos, glm::vec3 color, float a, float d, float s, float l, float q): Light(pos, color, a, d,s, false)
{
linear = l;
quadratic = q;
SetupCubeMap();
}
PointLight::PointLight(glm::vec3 pos, glm::vec3 ambi, glm::vec3 diff, glm::vec3 spec, float a, float d, float s, float l, float q):Light(pos, ambi, diff, spec, a, d, s, false)
{
linear = l;
quadratic = q;
SetupCubeMap();
}
void PointLight::SetupLight(Shader* shader)
{
shader->SetVec3("lights[0].position", position);
shader->SetVec3("lights[0].ambient", (ambient * ambient_strength));
shader->SetVec3("lights[0].diffuse", (diffuse * diffuse_strength));
shader->SetVec3("lights[0].specular", (specular * specular_strength));
shader->SetF("lights[0].linear", linear);
shader->SetF("lights[0].quadratic", quadratic);
}
void PointLight::SetupLight(Shader* shader, int num)
{
std::string name = "lights[" + std::to_string(num) + "]";
std::string temp;
shader->SetMat4("lightSpaceMatrix[" + std::to_string(num) + "]", lightProjection * lightView);
temp = name + ".position";
shader->SetVec3(temp.c_str(), position);
temp = name + ".ambient";
shader->SetVec3(temp.c_str(), (ambient * ambient_strength));
temp = name + ".diffuse";
shader->SetVec3(temp.c_str(), (diffuse * diffuse_strength));
temp = name + ".specular";
shader->SetVec3(temp.c_str(), (specular * specular_strength));
temp = name + ".linear";
shader->SetF(temp.c_str(), linear);
temp = name + ".quadratic";
shader->SetF(temp.c_str(), quadratic);
temp = name + ".depthMap";
shader->SetI(temp.c_str(), 4 + num);
}
void PointLight::SetupDepthShader(Shader* shader)
{
for (int c = 0; c < lightTransforms.size(); c++) {
std::string locName = "shadowMatrix[" + std::to_string(c) + "]";
shader->SetMat4(locName.c_str(), lightTransforms[c]);
}
shader->SetVec3("lightPos", position);
shader->SetF("farPlane", far_plane);
}
void PointLight::SetIntensity(float l, float q)
{
linear = l;
quadratic = q;
}
| [
"danyfids@gmail.com"
] | danyfids@gmail.com |
c46b7a0edc548395be7ed244261a1051c6055635 | 2964f257a3b15a6b75954babad4e2d3acbb14d19 | /.pio/libdeps/genericSTM32F103C8/avdweb_VirtualDelay/Test_6_delays/Test_6_delays.ino | e189701aa439c6efb512cefadac12f45e94acbc5 | [] | no_license | lunakk-PL/MESH_ST32_SSPI | ae7c11a7fadc168fc62e79b27041d17e16b53802 | c905d194e0783d7d8dd60aba3a81d34752d29e23 | refs/heads/master | 2022-12-24T08:42:24.861202 | 2020-09-29T15:44:36 | 2020-09-29T15:44:36 | 298,672,055 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,124 | ino | #include <Arduino.h>
#include <Streaming.h>
#include "avdweb_VirtualDelay.h"
void setup()
{ Serial.begin(9600);
}
void loop()
{ static VirtualDelay delay1, delay2, delay3, delay4, delay5, delay6;
DO_ONCE
( Serial << "\nDO_ONCE 1";
delay1.start(200); // start sequence delay1 delay2 delay3
delay4.start(550); // start one-shot delay4
delay5.start(1250); // start one-shot delay5
)
if(delay4.elapsed()) Serial << "\nONE-SHOT 550ms " << millis();
if(delay5.elapsed()) Serial << "\nONE-SHOT 1250ms " << millis();
if(millis()>2250) DO_ONCE(Serial << "\nDO_ONCE 2 2250ms " << millis()) // test a second DO_ONCE
delay6.start(750);
if(delay6.elapsed()) Serial << "\n Repeat delay6 750ms " << millis();
if(delay1.elapsed()) // sequence with deadlock
{ Serial << "\nsequence delay1 200ms " << millis();
delay2.start(100);
}
if(delay2.elapsed())
{ Serial << "\nsequence delay2 100ms " << millis();
delay3.start(400);
}
if(delay3.elapsed())
{ Serial << "\nsequence delay3 400ms " << millis();
delay1.start(200);
}
}
| [
"lunakk@gmail.com"
] | lunakk@gmail.com |
c0d2aef06d4dec302a1d4c5816f035389a5612a4 | a8c107459049e3a817002602b9d83372076f9aac | /src/parser/cpp/source/formatter.cpp | 52b933696d3fb6a8c4fb501e33613164733b0bf1 | [
"MIT"
] | permissive | cgabriel5/nodecliac | ee3c09370ddbf4852d8588942fc9308c47518437 | a8f49955c70f5463b82f74369ea44c16ea0ff28e | refs/heads/master | 2023-07-24T11:14:53.680757 | 2023-01-31T13:48:31 | 2023-01-31T13:48:31 | 167,310,837 | 11 | 0 | MIT | 2023-07-18T20:34:55 | 2019-01-24T05:48:38 | Nim | UTF-8 | C++ | false | false | 9,357 | cpp | #include "../headers/templates.hpp"
#include "../headers/structs.hpp"
#include "../headers/str.hpp"
#include "../headers/parsetools.hpp"
#include <tuple>
#include <string>
#include <vector>
#include <map>
#include <set>
#include <regex>
#include <iostream>
using namespace std;
string indent(const string &type_, const int &count,
const char &ichar, const int &iamount,
map<string, int> &MXP) {
// [https://stackoverflow.com/a/167810]
return std::string(((count || MXP[type_]) * iamount), ichar);
}
int prevtoken(StateParse &S, const int &tid, set<string> &skip/*={"tkNL"}*/) {
for (int ttid = tid - 1; ttid > -1; ttid--) {
if (!contains(skip, S.LexerData.tokens[ttid].kind)) {
return ttid;
}
}
return -1;
}
tuple <string, string, string, string, string, string, map<string, string>, string>
formatter(StateParse &S,
vector<vector<Token>> &branches,
vector<vector<vector<int>>> &cchains,
map<int, vector<Flag>> &flags,
vector<vector<int>> &settings) {
tabdata fmt = S.args.fmt;
bool igc = S.args.igc;
// vector<string> output;
// regex r("^[ \\t]+");
vector<Token> &tokens = S.LexerData.tokens;
map<int, string> &ttypes = S.LexerData.ttypes;
vector<int> &ttids = S.LexerData.ttids;
map<int, int> &dtids = S.LexerData.dtids;
// Indentation level multipliers.
map<string, int> MXP {
{"tkCMT", 0},
{"tkCMD", 0},
{"tkFLG", 1},
{"tkFOPT", 2},
{"tkBRC", 0},
{"tkNL", 0},
{"tkSTN", 0},
{"tkVAR", 0},
{"tkBRC_RP", 1},
{"tkBRC_LP", 2}
};
set<string> NO_NL_CMT {"tkNL", "tkCMT"};
set<string> ft_tkTYPES_NONE;
set<string> ft_tkTYPES_0 {"tkNL"};
set<string> ft_tkTYPES_1 {"tkSTN", "tkVAR"};
set<string> ft_tkTYPES_2 {"tkASG", "tkSTR", "tkAVAL"};
set<string> ft_tkTYPES_3 {"tkFVAL", "tkSTR", "tkDLS", "tkTBD"};
set<string> ft_tkTYPES_4 {"tkDLS", "tkASG"};
const char ichar = fmt.ichar;
const int iamount = fmt.iamount;
vector<string> cleaned;
for (auto const &branch : branches) {
string parentkind = branch[0].kind;
bool first_assignment = false;
int level = 0;
int brc_lp_count = 0;
bool group_open = false;
int j = 0;
for (auto const &leaf : branch) {
int tid = leaf.tid;
string kind = leaf.kind;
int line = leaf.line;
//// Settings / Variables
if (contains(ft_tkTYPES_1, parentkind)) {
if (kind == "tkTRM") continue;
if (tid != 0) {
Token& ptk = S.LexerData.tokens[prevtoken(S, tid, ft_tkTYPES_0)];
int dline = line - ptk.line;
if (contains(ft_tkTYPES_2, kind)) {
if (ptk.kind == "tkCMT") {
cleaned.push_back("\n");
if (dline > 1) cleaned.push_back("\n");
}
cleaned.push_back(" ");
} else {
if (dline == 0) cleaned.push_back(" ");
else if (dline == 1) cleaned.push_back("\n");
else cleaned.push_back("\n\n");
}
}
cleaned.push_back(tkstr(S, leaf.tid));
//// Command chains
} else if (parentkind == "tkCMD") {
if (tid != 0) {
Token& ptk = tokens[prevtoken(S, tid, ft_tkTYPES_0)];
int dline = line - ptk.line;
if (dline == 1) {
cleaned.push_back("\n");
} else if (dline > 1) {
if (!group_open) {
cleaned.push_back("\n");
cleaned.push_back("\n");
// [TODO] Add format settings to customize formatting.
// For example, collapse newlines in flag scopes?
// if level > 0: cleaned.pop()
}
}
}
// When inside an indentation level or inside parenthesis,
// append a space before every token to space things out.
// However, because this is being done lazily, some token
// conditions must be skipped. The skippable cases are when
// a '$' precedes a string (""), i.e. a '$"command"'. Or
// when an eq-sign precedes a '$', i.e. '=$("cmd")',
if ((level || brc_lp_count == 1) &&
contains(ft_tkTYPES_3, kind)) {
Token& ptk = tokens[prevtoken(S, tid, NO_NL_CMT)];
string pkind = ptk.kind;
if (pkind != "tkBRC_LP" && cleaned.back() != " " && not
((kind == "tkSTR" && pkind == "tkDLS") ||
(kind == "tkDLS" && pkind == "tkASG"))) {
cleaned.push_back(" ");
}
}
if (kind == "tkBRC_LC") {
group_open = true;
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkBRC_RC") {
group_open = false;
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkDCMA" && !first_assignment) {
cleaned.push_back(tkstr(S, leaf.tid));
// Append newline after group is cloased.
// if (!group_open) cleaned.push_back("\n");
} else if (kind == "tkASG" && !first_assignment) {
first_assignment = true;
cleaned.push_back(" ");
cleaned.push_back(tkstr(S, leaf.tid));
cleaned.push_back(" ");
} else if (kind == "tkBRC_LB") {
cleaned.push_back(tkstr(S, leaf.tid));
level = 1;
} else if (kind == "tkBRC_RB") {
level = 0;
first_assignment = false;
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkFLG") {
if (level) cleaned.push_back(indent(kind, level, ichar, iamount, MXP));
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkKYW") {
if (level) cleaned.push_back(indent(kind, level, ichar, iamount, MXP));
cleaned.push_back(tkstr(S, leaf.tid));
cleaned.push_back(" ");
} else if (kind == "tkFOPT") {
level = 2;
cleaned.push_back(indent(kind, level, ichar, iamount, MXP));
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkBRC_LP") {
brc_lp_count += 1;
Token& ptk = tokens[prevtoken(S, tid, ft_tkTYPES_0)];
string pkind = ptk.kind;
if (!contains(ft_tkTYPES_4, pkind)) {
int scope_offset = int(pkind == "tkCMT");
cleaned.push_back(indent(kind, level + scope_offset, ichar, iamount, MXP));
}
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkBRC_RP") {
brc_lp_count -= 1;
if (level == 2 && !brc_lp_count &&
branch[j - 1].kind != "tkBRC_LP") {
cleaned.push_back(indent(kind, level - 1, ichar, iamount, MXP));
level = 1;
}
cleaned.push_back(tkstr(S, leaf.tid));
} else if (kind == "tkCMT") {
string ptk = tokens[prevtoken(S, leaf.tid, ft_tkTYPES_NONE)].kind;
string atk = tokens[prevtoken(S, tid, ft_tkTYPES_0)].kind;
if (ptk == "tkNL") {
int scope_offset = 0;
if (atk == "tkASG") scope_offset = 1;
cleaned.push_back(indent(kind, level + scope_offset, ichar, iamount, MXP));
} else cleaned.push_back(" ");
cleaned.push_back(tkstr(S, leaf.tid));
} else {
cleaned.push_back(tkstr(S, leaf.tid));
}
//// Comments
} else if (parentkind == "tkCMT") {
if (tid != 0) {
Token& ptk = tokens[prevtoken(S, tid, ft_tkTYPES_0)];
int dline = line - ptk.line;
if (dline == 1) {
cleaned.push_back("\n");
} else {
cleaned.push_back("\n");
cleaned.push_back("\n");
}
}
cleaned.push_back(tkstr(S, leaf.tid));
} else {
if (kind != "tkTRM") {
cleaned.push_back(tkstr(S, leaf.tid));
}
}
j++;
}
}
// Return empty values to maintain parity with acdef.py.
string formatted = join(cleaned, "") + "\n";
tuple <string, string, string, string, string, string, map<string, string>, string> data;
map<string, string> placeholders;
data = make_tuple("", "", "", "", "", formatted, placeholders, "");
return data;
}
| [
"cgabriel5@users.noreply.github.com"
] | cgabriel5@users.noreply.github.com |
3328338c65a3f9cf96c7b1a4289debdae06635b5 | bb4a910dc6da617d2f6d526b8faab708ab7bf6aa | /src/msg.cpp | 35d95c00240fa93e12012290e2a1ed74e73ee40b | [] | no_license | TonioElbuey/Morse_cpp | b1cff2767c2c42d02a52a4c671b73788d7586752 | 9b913fb6d011c23adfb85e1ad896444541febdd1 | refs/heads/main | 2023-02-17T14:55:29.709867 | 2021-01-20T21:43:24 | 2021-01-20T21:43:24 | 323,300,432 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 4,394 | cpp | #include "../include/msg.h"
#include <iostream>
msg::msg() { // Remplissage des tables de conversion
tabFrToMorse['a'] = ".-";
tabFrToMorse['b'] = "-...";
tabFrToMorse['c'] = "-.-.";
tabFrToMorse['d'] = "-..";
tabFrToMorse['e'] = ".";
tabFrToMorse['f'] = "..-.";
tabFrToMorse['g'] = "--.";
tabFrToMorse['h'] = "....";
tabFrToMorse['i'] = "..";
tabFrToMorse['j'] = ".---";
tabFrToMorse['k'] = "-.-";
tabFrToMorse['l'] = ".-..";
tabFrToMorse['m'] = "--";
tabFrToMorse['n'] = "-.";
tabFrToMorse['o'] = "---";
tabFrToMorse['p'] = ".--.";
tabFrToMorse['q'] = "--.-";
tabFrToMorse['r'] = ".-.";
tabFrToMorse['s'] = "...";
tabFrToMorse['t'] = "-";
tabFrToMorse['u'] = "..-";
tabFrToMorse['v'] = "...-";
tabFrToMorse['w'] = ".--";
tabFrToMorse['x'] = "-..-";
tabFrToMorse['y'] = "-.--";
tabFrToMorse['z'] = "--..";
tabFrToMorse['1'] = ".----";
tabFrToMorse['2'] = "..---";
tabFrToMorse['3'] = "...--";
tabFrToMorse['4'] = "....-";
tabFrToMorse['5'] = ".....";
tabFrToMorse['6'] = "-....";
tabFrToMorse['7'] = "--...";
tabFrToMorse['8'] = "---..";
tabFrToMorse['9'] = "----.";
tabFrToMorse['0'] = "-----";
tabFrToMorse[' '] = " ";
tabMorseToFr[".-"] = 'a';
tabMorseToFr["-..."] = 'b';
tabMorseToFr["-.-."] = 'c';
tabMorseToFr["-.."] = 'd';
tabMorseToFr["."] = 'e';
tabMorseToFr["..-."] = 'f';
tabMorseToFr["--."] = 'g';
tabMorseToFr["...."] = 'h';
tabMorseToFr[".."] = 'i';
tabMorseToFr[".---"] = 'j';
tabMorseToFr["-.-"] = 'k';
tabMorseToFr[".-.."] = 'l';
tabMorseToFr["--"] = 'm';
tabMorseToFr["-."] = 'n';
tabMorseToFr["---"] = 'o';
tabMorseToFr[".--."] = 'p';
tabMorseToFr["--.-"] = 'q';
tabMorseToFr[".-."] = 'r';
tabMorseToFr["..."] = 's';
tabMorseToFr["-"] = 't';
tabMorseToFr["..-"] = 'u';
tabMorseToFr["...-"] = 'v';
tabMorseToFr[".--"] = 'w';
tabMorseToFr["-..-"] = 'x';
tabMorseToFr["-.--"] = 'y';
tabMorseToFr["--.."] = 'z';
tabMorseToFr[".----"] = '1';
tabMorseToFr[".----"] = '2';
tabMorseToFr["...--"] = '3';
tabMorseToFr["....-"] = '4';
tabMorseToFr["....."] = '5';
tabMorseToFr["-...."] = '6';
tabMorseToFr["--..."] = '7';
tabMorseToFr["---.."] = '8';
tabMorseToFr["----."] = '9';
tabMorseToFr["-----"] = '0';
}
void msg::set_msg_morse(std::string code) {
msg_morse = code;
}
void msg::set_msg_fr(std::string phrase) {
msg_fr = phrase;
}
std::string msg::get_msg_fr() {
return msg_fr;
}
std::string msg::get_msg_morse() {
return msg_morse;
}
void msg::frToMorse() {
int len = msg_fr.size();
for (int i=0; i<len; i++) { // Codage caractère par caractère
char carac = msg_fr[i];
std::string equivMorse = msg::tabFrToMorse[carac]; // Recherche de l'équivalent avec dictionnaire
msg_morse += equivMorse;
if (i != len-1) { // Pas de séparation en fin de message
msg_morse += ' '; // Séparation inter-caractères
}
/*
Si on tombe sur un espace pour le message en français (séparation inter-mots français) l'équivalent
dictionnaire plus l'ajout sur la ligne de code juste ci-dessus va bien ajouter un double espace
(séparation inter-mots morse).
*/
}
}
void msg::morseToFr() {
int len = msg_morse.size();
int i = 0; // Variable de parcours
while (i<len) { // Décodage caractère morse par caractère morse
int j = i; // Itérateur pour extraction caractère morse complet
std::string carMorse;
carMorse += msg_morse[i]; // Ajout de la première info (. ou -)
j++;
while ( (j<len) and (msg_morse[j] != ' ') ) { // Détection fin de caractère morse
carMorse += msg_morse[j]; // Extraction
j++;
}
char equivFr = msg::tabMorseToFr[carMorse]; // Recherche de l'équivalent avec dictionnaire (décodage)
msg_fr += equivFr;
// Détection espace inter-mots morse
if ( (j<len-2) and (msg_morse[j] == ' ') and (msg_morse[j+1] == ' ') and (msg_morse[j+2] == ' ') ) {
msg_fr += ' ';
j += 2; // Décalage dû à l'espace inter-mots morse
}
i = j+1; // Passage au mot suivant
}
} | [
"antoine.leboeuf@orange.fr"
] | antoine.leboeuf@orange.fr |
93502dab64eecbb26b8a32983d739531afe6b803 | 7ebc12c326dd918bc96c08167f0457ed2f8f93de | /CodeJam/2013/Round 1/C/hax.cpp | eaccfd4e38eb4e95ee57ecadb53c068d2e92ec13 | [] | no_license | qwaker00/Olymp | 635b61da0e80d1599edfe1bc9244b95f015b3007 | c3ab2c559fa09f080a3f02c84739609e1e85075d | refs/heads/master | 2021-01-18T16:35:58.452451 | 2015-08-06T16:45:58 | 2015-08-06T16:46:25 | 5,674,825 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,615 | cpp | #ifdef DEBUG
#define _GLIBCXX_DEBUG
#endif
#include <iostream>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <memory.h>
#include <math.h>
#include <string>
#include <string.h>
#include <queue>
#include <vector>
#include <set>
#include <deque>
#include <map>
#include <functional>
#include <numeric>
#include <sstream>
typedef long double LD;
typedef long long LL;
typedef unsigned long long ULL;
typedef unsigned int uint;
#define PI 3.1415926535897932384626433832795
#define sqr(x) ((x)*(x))
using namespace std;
#define BN 2222222
char s[4444];
int b[BN][26], bn;
bool eb[BN], p[BN], d[BN];
int f[4444][6];
void rec(int x, int p, char* s, int l, int err, int erc) {
if (eb[x]) {
if (erc == 0) {
erc++;
erc--;
}
f[p][err] = min(f[p][err], erc);
}
if (l > 0)
for (int i = 0; i < 26; ++i) {
if (!b[x][i]) continue;
if (*s == i + 'a')
rec(b[x][i], p + 1, s + 1, l - 1, min(err + 1, 4), erc);else
if (err == 4) {
rec(b[x][i], p + 1, s + 1, l - 1, 0, erc + 1);
}
}
}
int main() {
freopen("dict.txt", "r", stdin);
int maxl = 0;
while (gets(s)) {
int l = strlen(s);
if (l > maxl) maxl = l;
int cur = 0;
for (int i = 0; i < l; ++i) {
if (b[cur][s[i] - 'a'] == 0) b[cur][s[i] - 'a'] = ++bn;
cur = b[cur][s[i] - 'a'];
}
eb[cur] = 1;
} /*
queue<int> q;
for (int i = 0; i < 26; ++i) if (b[0][i]) {
q.push(b[0][i]);
p[b[0][i]] = 0;
d[b[0][i]] = 1;
}
while (!q.empty()) {
int x = q.front(); q.pop();
for (int i = 0; i < 26; ++i)
if (b[x][i]) {
q.push(b[x][i]);
p[b[x][i]] = b[p[x]][i];
d[b[x][i]] = d[x] + 1;
} else b[x][i] = b[p[x]][i];
}
*/
cerr << bn << " " << maxl << " " << clock() << endl;
freopen(".in", "r", stdin);
freopen(".out", "w", stdout);
int T;
cin >> T;
gets(s);
for (int __it = 1; __it <= T; ++__it) {
gets(s);
int l = strlen(s);
memset(f, 63, sizeof(f));
f[0][4] = 0;
for (int i = 0; i < l; ++i) {
for (int j = 0; j <= 4; ++j) {
if (f[i][j] < 1e9)
rec(0, i, s + i, l - i, j, f[i][j]);
}
}
int ans = *min_element(f[l], f[l] + 5);
cout << "Case #" << __it << ": " << ans << endl;
}
return 0;
}
| [
"qwaker.00@gmail.com"
] | qwaker.00@gmail.com |
c2d3ac994d3de2d9efddf1513744d4c0d80c9ae3 | c029229195cc5e8b049d11763d31708a90064752 | /code/1062.cpp | 6cca6944ad483b5e4471b2f1f5272c2a6f6886b8 | [
"MIT"
] | permissive | Tomspiano/PAT-Advanced-Level-Practice | 83abe3a1255429846f66977a946fa77d7e00af62 | e7f543a23f852bcbad52170897a42b41622191ed | refs/heads/master | 2022-11-28T15:28:01.791633 | 2020-08-18T18:29:19 | 2020-08-18T18:29:19 | 282,151,437 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,957 | cpp | #include<bits/stdc++.h>
#define N 100005
using namespace std;
#define rep(i,a,b) for(int i=(a); i<(b); ++i)
int n, low, high;
struct Node {
char id[10];
int vir, tal;
} tmp, a[N];
bool is_sage(int i) {
return a[i].vir>=high && a[i].tal>=high;
}
bool is_noble(int i) {
return a[i].vir>=high && a[i].tal<high;
}
bool is_fool(int i) {
return a[i].vir<high && a[i].tal<high && a[i].vir>=a[i].tal;
}
struct cmp {
bool operator() (const int &p, const int &q) const {
if(is_sage(p)) {
if(is_sage(q)) {
if(a[p].vir+a[p].tal != a[q].vir+a[q].tal)
return a[p].vir+a[p].tal > a[q].vir+a[q].tal;
if(a[p].vir != a[q].vir)
return a[p].vir > a[q].vir;
return strcmp(a[p].id, a[q].id) < 0;
}
return 1;
}
if(is_noble(p)) {
if(is_sage(q))
return 0;
if(is_noble(q)) {
if(a[p].vir+a[p].tal != a[q].vir+a[q].tal)
return a[p].vir+a[p].tal > a[q].vir+a[q].tal;
if(a[p].vir != a[q].vir)
return a[p].vir > a[q].vir;
return strcmp(a[p].id, a[q].id) < 0;
}
return 1;
}
if(is_fool(p)) {
if(is_sage(q) || is_noble(q))
return 0;
if(is_fool(q)) {
if(a[p].vir+a[p].tal != a[q].vir+a[q].tal)
return a[p].vir+a[p].tal > a[q].vir+a[q].tal;
if(a[p].vir != a[q].vir)
return a[p].vir > a[q].vir;
return strcmp(a[p].id, a[q].id) < 0;
}
return 1;
}
if(is_sage(q) || is_noble(q) || is_fool(q))
return 0;
if(a[p].vir+a[p].tal != a[q].vir+a[q].tal)
return a[p].vir+a[p].tal > a[q].vir+a[q].tal;
if(a[p].vir != a[q].vir)
return a[p].vir > a[q].vir;
return strcmp(a[p].id, a[q].id) < 0;
}
};
int t[N];
int main() {
scanf("%d%d%d", &n, &low, &high);
int cnt = 0;
while(n--) {
scanf("%s%d%d", tmp.id, &tmp.vir, &tmp.tal);
if(tmp.vir>=low && tmp.tal>=low) {
a[cnt] = tmp;
t[cnt] = cnt;
++cnt;
}
}
sort(t, t+cnt, cmp());
printf("%d\n", cnt);
rep(i,0,cnt) {
printf("%s %d %d\n", a[t[i]].id, a[t[i]].vir, a[t[i]].tal);
}
return 0;
}
| [
"helloworldbrandnew@163.com"
] | helloworldbrandnew@163.com |
6a19802a7a44de70d1d3b821196cfabd9beafe05 | 79a5d466c94f59431f120b47bb1a359f16a0d506 | /libc/bionic/__stack_chk_fail.cpp | cb039cfcd5a7767194a9de1f96db0a0950c580ee | [
"BSD-4-Clause",
"SMLNJ",
"LicenseRef-scancode-ibm-dhcp",
"BSD-3-Clause",
"BSD-4-Clause-UC",
"LicenseRef-scancode-warranty-disclaimer",
"Martin-Birgmeier",
"SunPro",
"MIT",
"ISC",
"LicenseRef-scancode-bsd-unchanged",
"LicenseRef-scancode-bsd-unmodified",
"HPND",
"LicenseRef-scancode-red-hat... | permissive | GZOSP/bionic | 9558f14c66c426fa16cbcaf4cb6c80748eba22d6 | 7e39f4997edbb8616c47c768f8d6986c6ed3776f | refs/heads/8.0 | 2021-01-19T17:41:10.350260 | 2017-06-19T17:39:02 | 2017-11-20T08:29:44 | 101,078,424 | 2 | 45 | null | 2017-09-29T05:50:02 | 2017-08-22T15:28:04 | C | UTF-8 | C++ | false | false | 1,580 | cpp | /*
* Copyright (C) 2008 The Android Open Source Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
* OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <stdlib.h>
#include "private/bionic_ssp.h"
#include "private/libc_logging.h"
void __stack_chk_fail() {
__libc_fatal("stack corruption detected (-fstack-protector)");
}
| [
"enh@google.com"
] | enh@google.com |
a5ad5456aaba27820107b010ad30775946a1c3f5 | ca57a8d2d129e5a2f156ff10201a959e55b47075 | /Projet/Modules/DHT11.ino | 6918c4a5a7e04e9b584355228efe73563665331d | [] | no_license | LesDeuxM/Projet-Maison-Connectee | cf51c9c22fbe7b7e0f8e0ff694cc022613d91c01 | b3b999d80e751d3c6d49d01b250e6e054496e560 | refs/heads/master | 2020-04-11T13:04:25.337377 | 2019-03-20T13:08:06 | 2019-03-20T13:08:06 | 161,803,159 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 572 | ino | #include <dht.h>
#define dataPin 2
dht DHT; // Creats a DHT object
void setup() {
Serial.begin(9600);
}
void loop() {
int readData = DHT.read11(dataPin); // Reads the data from the sensor
float t = DHT.temperature; // Gets the values of the temperature
float h = DHT.humidity; // Gets the values of the humidity
// Printing the results on the serial monitor
Serial.print("Temperature = ");
Serial.print(t);
Serial.print(" *C ");
Serial.print(" Humidity = ");
Serial.print(h);
Serial.println(" % ");
delay(4000);
}
| [
"noreply@github.com"
] | noreply@github.com |
81ed9ea96f52d55636a3392149143d6b24c839a8 | da8d1b8255feb551e9dc36853cd680da113791a4 | /src/AvbApi_Factory.cpp | 52b29eb530e3e4c5d3931fcedf494169191c2f98 | [
"BSD-2-Clause"
] | permissive | jdkoftinoff/jdksavbapi | a577285fde064d7be0de8d78bddcf2ac18a89128 | 431ee094ed22d05fc5ec7bc0b91f216e33466aae | refs/heads/master | 2020-05-18T17:23:21.335287 | 2014-10-07T20:57:39 | 2014-10-07T20:57:39 | 24,826,923 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 1,408 | cpp | /*
Copyright (c) 2014, Jeff Koftinoff
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "AvbApi_world.hpp"
#include "AvbApi_Factory.hpp"
const char *avbapi_factory_file = __FILE__;
| [
"jeffk@jdkoftinoff.com"
] | jeffk@jdkoftinoff.com |
38cfe106808e7d94b811e049dc27533cf6dc5e33 | eac7ae395c4832ac394087054ab014d1d6a9f6a6 | /opt-truss-decomp/pkt_serial/pkt_serial.cpp | 88b95c3907acf4d92c8800e179ac5038deff30d7 | [
"MIT"
] | permissive | mexuaz/AccTrussDecomposition | 21be22007e1c50ca4b7df6fbbad1dfbf4c2fffae | 15a9e8fd2f123f5acace5f3b40b94f1a74eb17d4 | refs/heads/master | 2022-12-14T03:41:05.133564 | 2020-09-03T00:35:33 | 2020-09-03T00:35:33 | 291,565,779 | 0 | 0 | MIT | 2020-08-30T22:57:55 | 2020-08-30T22:57:55 | null | UTF-8 | C++ | false | false | 5,560 | cpp | #include <cassert>
#include "util/graph/graph.h"
#include "../util/util.h"
#include "../util/timer.h"
#include "util/log/log.h"
#include "opt_pkt/parallel_all_edge_cnc.h"
#include "util/intersection/set_utils.h"
void PKT_scan_serial(long numEdges, int *EdgeSupport, int level, eid_t *curr, long *currTail) {
for (long i = 0; i < numEdges; i++) {
if (EdgeSupport[i] == level) {
curr[(*currTail)] = i;
(*currTail) = (*currTail) + 1;
}
}
}
//Serially process sub-level in a level using intersection
void PKT_processSubLevel_serial_intersection(graph_t *g, eid_t *curr, long currTail, int *EdgeSupport,
int level, eid_t *next, long *nextTail, bool *processed,
Edge *edgeIdtoEdge) {
vector<pair<eid_t, eid_t >> intersection_res;
auto serial_sup_updater = [EdgeSupport, &next, level, &nextTail](eid_t edge) {
EdgeSupport[edge] = EdgeSupport[edge] - 1;
if (EdgeSupport[edge] == level) {
next[(*nextTail)] = edge;
(*nextTail) = (*nextTail) + 1;
}
};
for (long i = 0; i < currTail; i++) {
//process edge <u,v>
eid_t e1 = curr[i];
Edge edge = edgeIdtoEdge[e1];
vid_t u = edge.u;
vid_t v = edge.v;
eid_t uStart = g->num_edges[u], uEnd = g->num_edges[u + 1];
eid_t vStart = g->num_edges[v], vEnd = g->num_edges[v + 1];
eid_t off_nei_u = uStart, off_nei_v = vStart;
intersection_res.clear();
if (uEnd - uStart > 0 && vEnd - vStart > 0) {
SetIntersection(g, uStart, uEnd, vStart, vEnd, intersection_res);
for (auto pair: intersection_res) {
std::tie(off_nei_v, off_nei_u) = pair;
eid_t e2 = g->eid[off_nei_v]; //<v,w>
eid_t e3 = g->eid[off_nei_u]; //<u,w>
//If e1, e2, e3 forms a triangle
if ((!processed[e2]) && (!processed[e3])) {
//Decrease support of both e2 and e3
if (EdgeSupport[e2] > level && EdgeSupport[e3] > level) {
//Process e2
serial_sup_updater(e2);
//Process e3
serial_sup_updater(e3);
} else if (EdgeSupport[e2] > level) {
//process e2
serial_sup_updater(e2);
} else if (EdgeSupport[e3] > level) {
//process e3
serial_sup_updater(e3);
}
}
}
}
processed[e1] = true;
}
}
/** Serial PKT_intersection Algorithm ***/
void PKT_serial_intersection(graph_t *g, int *EdgeSupport, Edge *edgeIdToEdge) {
size_t tc_cnt = 0;
long numEdges = g->m / 2;
long n = g->n;
//An array to mark processed array
bool *processed = (bool *) malloc(numEdges * sizeof(bool));
assert(processed != nullptr);
long currTail = 0;
long nextTail = 0;
auto *curr = (eid_t *) malloc(numEdges * sizeof(eid_t));
assert(curr != nullptr);
auto *next = (eid_t *) malloc(numEdges * sizeof(eid_t));
assert(next != nullptr);
auto *startEdge = (eid_t *) malloc(n * sizeof(eid_t));
assert(startEdge != nullptr);
//Initialize the arrays
for (eid_t e = 0; e < numEdges; e++) {
processed[e] = false;
}
//Find the startEdge for each vertex
for (vid_t i = 0; i < n; i++) {
eid_t j = g->num_edges[i];
eid_t endIndex = g->num_edges[i + 1];
while (j < endIndex) {
if (g->adj[j] > i)
break;
j++;
}
startEdge[i] = j;
}
#if TIME_RESULTS
double triTime = 0;
double scanTime = 0;
double procTime = 0;
double startTime = timer();
#endif
Timer iter_timer;
#pragma omp parallel for schedule(dynamic, 6000) reduction(+:tc_cnt)
for (auto i = 0u; i < g->m; i++)
ComputeSupport(g, EdgeSupport, tc_cnt, i);
#pragma omp single
log_trace("TC Cnt: %'zu", tc_cnt / 3);
#if TIME_RESULTS
triTime = timer() - startTime;
startTime = timer();
#endif
//Support computation is done
//Computing support now
int level = 0;
long todo = numEdges;
log_trace("Before Level-Processing: %.9lfs", iter_timer.elapsed());
iter_timer.reset();
while (todo > 0) {
log_trace("Current Level: %d, Elapsed Time: %.9lfs", level, iter_timer.elapsed());
iter_timer.reset();
#if TIME_RESULTS
startTime = timer();
#endif
PKT_scan_serial(numEdges, EdgeSupport, level, curr, &currTail);
#if TIME_RESULTS
scanTime += timer() - startTime;
startTime = timer();
#endif
while (currTail > 0) {
todo -= currTail;
PKT_processSubLevel_serial_intersection(g, curr, currTail, EdgeSupport, level, next, &nextTail, processed,
edgeIdToEdge);
swap(curr, next);
currTail = nextTail;
nextTail = 0;
}
#if TIME_RESULTS
procTime += timer() - startTime;
#endif
level = level + 1;
}
#if TIME_RESULTS
log_info("Tri time: %9.3lf Scan Time: %9.3lf Proc Time: %9.3lf", triTime, scanTime, procTime);
log_info("PKT-serial-Time-Intersection: %9.3lf", triTime + scanTime + procTime);
#endif
//Free memory
free(next);
free(curr);
free(processed);
free(startEdge);
}
| [
"yche@cse.ust.hk"
] | yche@cse.ust.hk |
a04a4a35610f2dd531051357e2035059385bab0b | 98132454bb05bff417826080fc7908701f2c643f | /LunarLander/Vector2.h | a8aa52bcfc2e32bf9d6e2561e5d5ef0a0337e860 | [] | no_license | CGTGPY3G1/LunarLander | a10fb31b3c588e51edeaab6d3b4c7f3d716e3f42 | 08be89aa17ddb9b911f5ff976d5e3ed2949a1f1f | refs/heads/master | 2021-03-22T03:35:59.311636 | 2018-07-19T18:38:05 | 2018-07-19T18:38:05 | 89,891,950 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,333 | h | #pragma once
class Vector2{
public:
Vector2();
Vector2(const float & x, const float & y);
~Vector2();
float GetX();
void SetX(const float & x);
float GetY();
void SetY(const float & y);
Vector2 Set(const float & x, const float & y);
Vector2 operator = (Vector2 toSet);
Vector2 Add(const float & x, const float & y);
Vector2 operator + (Vector2 toAdd);
const Vector2 operator + (const Vector2 & toAdd) const;
Vector2 operator += (Vector2 toAdd);
Vector2 Subtract(const float & x, const float & y);
Vector2 operator - (Vector2 toSubtract);
const Vector2 operator - (const Vector2 & toSubtract) const;
Vector2 operator -= (Vector2 toSubtract);
Vector2 Scale(const float & scalar);
Vector2 operator * (const float & scalar);
Vector2 operator / (const float & scalar);
float Dot(Vector2 other);
float Dot(const float & x, const float & y);
Vector2 Normalize();
Vector2 Normalized();
float SquareMagnitude();
float Magnitude();
float AngleInRadians();
Vector2 RotateInRadians(const float & angle);
Vector2 RotatedInRadians(const float & angle);
/// <summary>
/// rotates the object 90 degrees in the specified direction.
/// </summary>
/// <param name="clockwise">the direction (true if clockwise).</param>
/// <returns>this</returns>
Vector2 Flip90(const bool & clockwise);
private:
float x, y;
};
| [
"b00289996@studentmail.uws.ac.uk"
] | b00289996@studentmail.uws.ac.uk |
8b032038aef565083ed64a933d67f9950bd9dede | b0c34dedca1af4710483379cfb8a8309cb48629d | /ReadFile/ReadFile/main.cpp | aeb996c30c403c54e7a30ebe2ddd99c1bc1c8e44 | [] | no_license | EliasRosendahl/FileProccesing | 59bf892108db249343036cfb4ad7b738f7e15d30 | 83c9e724c294d6eed23cae9bd1efe5c591d6d399 | refs/heads/master | 2020-03-21T09:41:37.517319 | 2018-06-25T07:03:41 | 2018-06-25T07:03:41 | 138,412,780 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 502 | cpp | #include <cstdlib>
#include <string>
#include <iostream>
#include <fstream>
#include <iomanip>
int main()
{
std::ifstream fileIn("users.txt", std::ios::in);
if(!fileIn)
{
std::cerr << "Could not read file" << std::endl;
exit(EXIT_FAILURE);
}
std::cout << std::left << std::setw(10) << "Name" << std::setw(14) << "ID" << std::endl;
std::string name;
int ID;
while (fileIn >> name >> ID)
{
std::cout << std::left << std::setw(10) << name << std::setw(14) << ID << std::endl;
}
}
| [
"jensen.eliasrosendahl@gmail.com"
] | jensen.eliasrosendahl@gmail.com |
3ade205a7fe211ad0bac6d41d96b7291777b268b | c76f82e0ffd084c31186eeb7f5baa00a5ee91e43 | /OOP/list5/Rectangle.h | d367526f5bfccbc6adccd1d6dfd556ba48e9e6c7 | [] | no_license | ComputerScienceWUST2017/Pawel-Wis | c5cdbe69e40079db5d0ddf948b22663228fd9e20 | 1cb73b3a40b5681cf30ca3b59021a30020128ce9 | refs/heads/master | 2021-05-09T20:06:03.823163 | 2019-02-21T13:52:59 | 2019-02-21T13:52:59 | 118,676,765 | 1 | 0 | null | 2019-02-21T12:30:40 | 2018-01-23T22:10:13 | C++ | UTF-8 | C++ | false | false | 550 | h | //
// Created by pawel on 14.11.18.
//
#ifndef EXE1_RECTANGLE_H
#define EXE1_RECTANGLE_H
#include <cmath>
class Rectangle {
protected:
float a,b;
virtual float area(float,float);
public:
Rectangle();
Rectangle(float,float);
virtual float area();
virtual float diagonal();
virtual ~Rectangle();
};
class Cuboid: public Rectangle{
protected:
float c;
public:
Cuboid();
Cuboid(float,float,float);
float area() override;
float diagonal() override;
~Cuboid() override;
};
#endif //EXE1_RECTANGLE_H
| [
"wispawelwis38@gmail.com"
] | wispawelwis38@gmail.com |
5c04b60c4607df34dd217842fa4bb7caf6958eef | f2ba8df61507b9f95f49522de2d709a7d40f915f | /USACO/combo.cpp | 3c8636b756a4c332bd89f6cf6ed8fc917724047e | [] | no_license | YuzhenYe/WET | 8aacd69f523a21009b647bc2bf2fc49c403d62c3 | 986c4ec527af122c400f26e7373243ee587f2034 | refs/heads/master | 2021-06-05T09:43:48.715307 | 2020-08-04T21:27:56 | 2020-08-04T21:27:56 | 130,075,372 | 0 | 2 | null | null | null | null | UTF-8 | C++ | false | false | 1,253 | cpp | /*
TASK: combo
ID: yuzheny2
LANG: C++
*/
#include <iostream>
#include <fstream>
using namespace::std;
//t: try, n: answer, dial: total
bool isgood(int t, int n, int dial)
{
int error = 2;
int s = (t < n)?t:n;
int b = (t > n)?t:n;
if(b - s <= error) return true;
if(dial - b + s <= error) return true;
return false;
}
int main()
{
int dial, f1, f2, f3, m1, m2, m3;
ifstream fin("combo.in");
ofstream fout("combo.out");
fin >> dial >> f1 >> f2 >> f3 >> m1 >> m2 >> m3;
int solution = 0;
int n1, n2, n3;
for(n1 = 1; n1 <= dial; n1 ++) {
//compare with farmer's lock
if(!(isgood(n1, f1, dial) || isgood(n1, m1, dial))) continue;
for(n2 = 1; n2 <= dial; n2 ++) {
if(!(isgood(n2, f2, dial) || isgood(n2, m2, dial))) continue;
for(n3 = 1; n3 <= dial; n3 ++) {
if(!(isgood(n3, f3, dial) || isgood(n3, m3, dial))) continue;
if(isgood(n1, f1, dial) && isgood(n2, f2, dial) && isgood(n3, f3, dial)) {
//cout<<n1<<","<<n2<<","<<n3<<endl;
solution += 1;
}
else if(isgood(n1, m1, dial) && isgood(n2, m2, dial) && isgood(n3, m3, dial)) {
//cout<<n1<<","<<n2<<","<<n3<<endl;
solution += 1;
}
}
}
}
fout<<solution<<endl;
}
| [
"yye@yuzhen-yes-mbp.attlocal.net"
] | yye@yuzhen-yes-mbp.attlocal.net |
4cf0be699bcfaacb850198e5e98fc54a9229920c | 5443bf5b4d2d8da95f84fd9c3fafe388d83a46b5 | /src/decode.h | 5c88264372d13a32e90b8f6e890dafa346986f8c | [
"Apache-2.0"
] | permissive | google/zuckerli | c5b4fc6eee55b7b3bf76e885d9f3e4158129da89 | 874ac40705d1e67d2ee177865af4a41b5bc2b250 | refs/heads/master | 2023-07-05T10:38:40.295265 | 2022-09-28T13:56:59 | 2022-09-28T13:56:59 | 292,382,698 | 30 | 9 | Apache-2.0 | 2022-09-28T13:57:01 | 2020-09-02T20:02:36 | C++ | UTF-8 | C++ | false | false | 10,481 | h | #ifndef ZUCKERLI_DECODE_H
#define ZUCKERLI_DECODE_H
#include <chrono>
#include <limits>
#include <vector>
#include "ans.h"
#include "bit_reader.h"
#include "checksum.h"
#include "common.h"
#include "context_model.h"
#include "huffman.h"
#include "integer_coder.h"
namespace zuckerli {
namespace detail {
template <typename Reader, typename CB>
bool DecodeGraphImpl(size_t N, bool allow_random_access, Reader* reader,
BitReader* br, const CB& cb,
std::vector<size_t>* node_start_indices) {
using IntegerCoder = zuckerli::IntegerCoder;
// Storage for the previous up-to-MaxNodesBackwards() lists to be used as a
// reference.
std::vector<std::vector<uint32_t>> prev_lists(
std::min(MaxNodesBackwards(), N));
std::vector<uint32_t> residuals;
std::vector<uint32_t> block_lengths;
for (size_t i = 0; i < prev_lists.size(); i++) prev_lists[i].clear();
size_t rle_min =
allow_random_access ? kRleMin : std::numeric_limits<size_t>::max();
// The three quantities below get reset to after kDegreeReferenceChunkSize
// adjacency lists if in random-access mode.
//
// Reference degree for degree delta coding.
size_t last_degree = 0;
// Last degree delta for context modeling.
size_t last_degree_delta = 0;
// Last reference offset for context modeling.
size_t last_reference_offset = 0;
for (size_t current_node = 0; current_node < N; current_node++) {
size_t i_mod = current_node % MaxNodesBackwards();
prev_lists[i_mod].clear();
block_lengths.clear();
size_t degree;
if (node_start_indices) node_start_indices->push_back(br->NumBitsRead());
if ((allow_random_access &&
current_node % kDegreeReferenceChunkSize == 0) ||
current_node == 0) {
degree = IntegerCoder::Read(kFirstDegreeContext, br, reader);
last_degree_delta =
degree; // special case: we assume a node -1 with degree 0
last_reference_offset = 0;
} else {
size_t ctx = DegreeContext(last_degree_delta);
last_degree_delta = IntegerCoder::Read(ctx, br, reader);
degree =
last_degree +
UnpackSigned(
last_degree_delta); // this can be negative, hence calling this
}
last_degree = degree;
if (degree > N) return ZKR_FAILURE("Invalid degree");
if (degree == 0) continue;
// If this is not the first node, read the offset of the list to be used as
// a reference.
size_t reference_offset = 0;
if (current_node != 0) {
reference_offset = IntegerCoder::Read(
ReferenceContext(last_reference_offset), br, reader);
last_reference_offset = reference_offset;
}
if (reference_offset > current_node)
return ZKR_FAILURE("Invalid reference_offset");
// If a reference_offset is used, read the list of blocks of (alternating)
// copied and skipped edges.
size_t num_to_copy = 0;
if (reference_offset != 0) {
size_t block_count = IntegerCoder::Read(kBlockCountContext, br, reader);
size_t block_end = 0; // end of current block
for (size_t j = 0; j < block_count; j++) {
size_t ctx = j == 0
? kBlockContext
: (j % 2 == 0 ? kBlockContextEven : kBlockContextOdd);
size_t block_len;
if (j == 0) {
block_len = IntegerCoder::Read(ctx, br, reader);
} else {
block_len = IntegerCoder::Read(ctx, br, reader) + 1;
}
block_end += block_len;
block_lengths.push_back(block_len);
}
if (prev_lists[(current_node - reference_offset) % MaxNodesBackwards()]
.size() < block_end) {
return ZKR_FAILURE("Invalid block copy pattern");
}
// Last block is implicit and goes to the end of the reference list.
block_lengths.push_back(
prev_lists[(current_node - reference_offset) % MaxNodesBackwards()]
.size() -
block_end);
// Blocks in even positions are to be copied.
for (size_t i = 0; i < block_lengths.size(); i += 2) {
num_to_copy += block_lengths[i];
}
}
// Read all the edges that are not copied.
// reference_offset node for delta-coding of neighbours.
size_t last_dest_plus_one = 0; // will not be used
// Number of edges to read.
size_t num_residuals = degree - num_to_copy;
// Last delta for the residual edges, used for context modeling.
size_t last_residual_delta = 0;
// Current position in the reference list (because we are making a sorted
// merged list).
size_t ref_pos = 0;
// Number of nodes of the current block that should still be copied.
size_t num_to_copy_from_current_block =
block_lengths.empty() ? 0 : block_lengths[0];
// Index of the next block.
size_t next_block = 1;
// If we don't need to copy anything from the first block, and we have at
// least another even-positioned block, advance the position in the
// reference_offset list accordingly.
if (num_to_copy_from_current_block == 0 && block_lengths.size() > 2) {
ref_pos = block_lengths[1];
num_to_copy_from_current_block = block_lengths[2];
next_block = 3;
}
// ID of reference list.
size_t ref_id = (current_node - reference_offset) % MaxNodesBackwards();
// Number of consecutive zeros that have been decoded last.
// Delta encoding with -1.
size_t contiguous_zeroes_len = 0;
// Number of further zeros that should not be read from the bitstream.
size_t num_zeros_to_skip = 0;
const auto append = [&](size_t x) {
if (x >= N) return ZKR_FAILURE("Invalid residual");
prev_lists[i_mod].push_back(x);
cb(current_node, x);
return true;
};
for (size_t j = 0; j < num_residuals; j++) {
size_t destination_node;
if (j == 0) {
last_residual_delta =
IntegerCoder::Read(FirstResidualContext(num_residuals), br, reader);
destination_node = current_node + UnpackSigned(last_residual_delta);
} else if (num_zeros_to_skip >
0) { // If in a zero run, don't read anything.
last_residual_delta = 0;
destination_node = last_dest_plus_one;
} else {
last_residual_delta = IntegerCoder::Read(
ResidualContext(last_residual_delta), br, reader);
destination_node = last_dest_plus_one + last_residual_delta;
}
// Compute run of zeros if we read a zero and we are not already in one.
if (last_residual_delta == 0 && num_zeros_to_skip == 0) {
contiguous_zeroes_len++;
} else {
contiguous_zeroes_len = 0;
}
// If we are in a run of zeros, decrease its length.
if (num_zeros_to_skip > 0) {
num_zeros_to_skip--;
}
// Merge the edges copied from the reference_offset list with the ones
// read from the bitstream.
while (num_to_copy_from_current_block > 0 &&
prev_lists[ref_id][ref_pos] <= destination_node) {
num_to_copy_from_current_block--;
ZKR_RETURN_IF_ERROR(append(prev_lists[ref_id][ref_pos]));
// If our delta coding would produce an edge to destination_node, but y
// with y<=destination_node is copied from the reference_offset list, we
// increase destination_node. In other words, it's delta coding with
// respect to both lists (prev_lists and residuals).
if (j != 0 && prev_lists[ref_id][ref_pos] >= last_dest_plus_one) {
destination_node++;
}
ref_pos++;
if (num_to_copy_from_current_block == 0 &&
next_block + 1 < block_lengths.size()) {
ref_pos += block_lengths[next_block];
num_to_copy_from_current_block = block_lengths[next_block + 1];
next_block += 2;
}
}
// If the current run of zeros is large enough, read how many further
// zeros to decode from the bitstream.
if (contiguous_zeroes_len >= rle_min) {
num_zeros_to_skip = IntegerCoder::Read(kRleContext, br, reader);
contiguous_zeroes_len = 0;
}
ZKR_RETURN_IF_ERROR(append(destination_node));
last_dest_plus_one = destination_node + 1;
}
ZKR_ASSERT(ref_pos + num_to_copy_from_current_block <=
prev_lists[ref_id].size());
// Process the rest of the block-copy list.
while (num_to_copy_from_current_block > 0) {
num_to_copy_from_current_block--;
ZKR_RETURN_IF_ERROR(append(prev_lists[ref_id][ref_pos]));
ref_pos++;
if (num_to_copy_from_current_block == 0 &&
next_block + 1 < block_lengths.size()) {
ref_pos += block_lengths[next_block];
num_to_copy_from_current_block = block_lengths[next_block + 1];
next_block += 2;
}
}
}
if (!reader->CheckFinalState()) {
return ZKR_FAILURE("Invalid stream");
}
return true;
}
} // namespace detail
bool DecodeGraph(const std::vector<uint8_t>& compressed,
size_t* checksum = nullptr,
std::vector<size_t>* node_start_indices = nullptr) {
if (compressed.empty()) return ZKR_FAILURE("Empty file");
auto start = std::chrono::high_resolution_clock::now();
BitReader reader(compressed.data(), compressed.size());
size_t N = reader.ReadBits(48);
bool allow_random_access = reader.ReadBits(1);
size_t edges = 0, chksum = 0;
auto edge_callback = [&](size_t a, size_t b) {
edges++;
chksum = Checksum(chksum, a, b);
};
if (allow_random_access) {
HuffmanReader huff_reader;
huff_reader.Init(kNumContexts, &reader);
ZKR_RETURN_IF_ERROR(
detail::DecodeGraphImpl(N, allow_random_access, &huff_reader, &reader,
edge_callback, node_start_indices));
} else {
ANSReader ans_reader;
ans_reader.Init(kNumContexts, &reader);
ZKR_RETURN_IF_ERROR(
detail::DecodeGraphImpl(N, allow_random_access, &ans_reader, &reader,
edge_callback, node_start_indices));
}
auto stop = std::chrono::high_resolution_clock::now();
float elapsed =
std::chrono::duration_cast<std::chrono::microseconds>(stop - start)
.count();
fprintf(stderr, "Decompressed %.2f ME/s (%zu) from %.2f BPE. Checksum: %lx\n",
edges / elapsed, edges, 8.0 * compressed.size() / edges, chksum);
if (checksum) *checksum = chksum;
return true;
}
} // namespace zuckerli
#endif // ZUCKERLI_DECODE_H
| [
"veluca@google.com"
] | veluca@google.com |
63219c59aa489079cfbd95cfc074091500808bcc | 29a1cc308f70b661ba721237f19fef0cab881bf1 | /src/XJS-functions/XGraphics.cpp | 20a1c5b35e274f93eb52e4e509b51926ffb53e17 | [
"MIT"
] | permissive | joboet/XEcmaScript | 8911ed202e2e878586ef48aa2f45fa7f02250d22 | d5fc8020a3c186a10c46608d10ebd3baf5f36b41 | refs/heads/master | 2021-09-28T02:51:12.102834 | 2018-09-05T17:33:27 | 2018-09-05T17:33:27 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,630 | cpp | /*#include "XGraphics.hpp"
#include <vector>
#include <functional>
class openglWrapper {
private:
//static std::vector<std::function<void(...)>> panelFunctions;
//static std::vector<std::function<void(...)>> screenFunctions;
static uint16_t matrixLevel;
static void translate(double x, double y, double z) {
glTranslated(x, y, z);
}
static void rotate(double x, double y, double z) {
glRotated(x, 1, 0, 0);
glRotated(y, 0, 1, 0);
glRotated(z, 0, 0, 1);
}
static void scale(double x, double y, double z) {
glScaled(x, y, z);
}
static void reset() {
glLoadIdentity();
}
static void push() {
glPushMatrix();
matrixLevel++;
}
static void pop() {
if (matrixLevel) {
glPopMatrix();
matrixLevel--;
}
}
static void executeFunctions(std::vector<std::function<void(...)>> &functions) {
for (uint16_t i = 0; i < functions.size(); i++) {
functions[i];
}
}
public:
static void panel() {
}
static void screen() {
}
};
uint16_t openglWrapper::matrixLevel = 0;
JSClass XGraphicsClass = {
"XGraphics",
JSCLASS_HAS_PRIVATE,
};
*/
void initOpenGL() {
}
/*
void initXGraphics(JSContext * context, JS::HandleObject global) {
JS::PersistentRootedObject * XGraphics = new JS::PersistentRootedObject(context, JS_NewObject(context, &XGraphicsClass));
JS_DefineProperty(context, global, "XGraphics", *XGraphics, JSPROP_PERMANENT | JSPROP_READONLY);
}*/
| [
"noreply@github.com"
] | noreply@github.com |
881b832b5b562ad5943ea11ebfc39aae1b9320d2 | e15942415003af06d094871f19dd6f7cdc5f67e0 | /Common/vtkSignedCharArray.h | bbb63118e5bb40bdb550b397653fa4a458e7991d | [
"BSD-3-Clause"
] | permissive | SCS-B3C/VTK5.6 | e5b48b6b3c7a09d4d6b5c6c468b1053af0c39d37 | d4afb224f638c1f7e847b0cd3195ea8a977bb602 | refs/heads/master | 2021-01-13T02:37:03.081012 | 2011-04-08T12:06:58 | 2011-04-08T12:06:58 | 1,587,076 | 2 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 5,070 | h | /*=========================================================================
Program: Visualization Toolkit
Module: vtkSignedCharArray.h
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================*/
// .NAME vtkSignedCharArray - dynamic, self-adjusting array of signed char
// .SECTION Description
// vtkSignedCharArray is an array of values of type signed char.
// It provides methods for insertion and retrieval of values and will
// automatically resize itself to hold new data.
#ifndef __vtkSignedCharArray_h
#define __vtkSignedCharArray_h
// Tell the template header how to give our superclass a DLL interface.
#if !defined(__vtkSignedCharArray_cxx)
# define VTK_DATA_ARRAY_TEMPLATE_TYPE signed char
#endif
#include "vtkDataArray.h"
#include "vtkDataArrayTemplate.h" // Real Superclass
// Fake the superclass for the wrappers.
#define vtkDataArray vtkDataArrayTemplate<signed char>
class VTK_COMMON_EXPORT vtkSignedCharArray : public vtkDataArray
#undef vtkDataArray
{
public:
static vtkSignedCharArray* New();
vtkTypeMacro(vtkSignedCharArray,vtkDataArray);
void PrintSelf(ostream& os, vtkIndent indent);
// Description:
// Get the data type.
int GetDataType()
{ return VTK_SIGNED_CHAR; }
// Description:
// Copy the tuple value into a user-provided array.
void GetTupleValue(vtkIdType i, signed char* tuple)
{ this->RealSuperclass::GetTupleValue(i, tuple); }
// Description:
// Set the tuple value at the ith location in the array.
void SetTupleValue(vtkIdType i, const signed char* tuple)
{ this->RealSuperclass::SetTupleValue(i, tuple); }
// Description:
// Insert (memory allocation performed) the tuple into the ith location
// in the array.
void InsertTupleValue(vtkIdType i, const signed char* tuple)
{ this->RealSuperclass::InsertTupleValue(i, tuple); }
// Description:
// Insert (memory allocation performed) the tuple onto the end of the array.
vtkIdType InsertNextTupleValue(const signed char* tuple)
{ return this->RealSuperclass::InsertNextTupleValue(tuple); }
// Description:
// Get the data at a particular index.
signed char GetValue(vtkIdType id)
{ return this->RealSuperclass::GetValue(id); }
// Description:
// Set the data at a particular index. Does not do range checking. Make sure
// you use the method SetNumberOfValues() before inserting data.
void SetValue(vtkIdType id, signed char value)
{ this->RealSuperclass::SetValue(id, value); }
// Description:
// Specify the number of values for this object to hold. Does an
// allocation as well as setting the MaxId ivar. Used in conjunction with
// SetValue() method for fast insertion.
void SetNumberOfValues(vtkIdType number)
{ this->RealSuperclass::SetNumberOfValues(number); }
// Description:
// Insert data at a specified position in the array.
void InsertValue(vtkIdType id, signed char f)
{ this->RealSuperclass::InsertValue(id, f); }
// Description:
// Insert data at the end of the array. Return its location in the array.
vtkIdType InsertNextValue(signed char f)
{ return this->RealSuperclass::InsertNextValue(f); }
// Description:
// Get the address of a particular data index. Make sure data is allocated
// for the number of items requested. Set MaxId according to the number of
// data values requested.
signed char* WritePointer(vtkIdType id, vtkIdType number)
{ return this->RealSuperclass::WritePointer(id, number); }
// Description:
// Get the address of a particular data index. Performs no checks
// to verify that the memory has been allocated etc.
signed char* GetPointer(vtkIdType id)
{ return this->RealSuperclass::GetPointer(id); }
// Description:
// This method lets the user specify data to be held by the array. The
// array argument is a pointer to the data. size is the size of
// the array supplied by the user. Set save to 1 to keep the class
// from deleting the array when it cleans up or reallocates memory.
// The class uses the actual array provided; it does not copy the data
// from the suppled array.
void SetArray(signed char* array, vtkIdType size, int save)
{ this->RealSuperclass::SetArray(array, size, save); }
void SetArray(signed char* array, vtkIdType size, int save, int deleteMethod)
{ this->RealSuperclass::SetArray(array, size, save, deleteMethod); }
protected:
vtkSignedCharArray(vtkIdType numComp=1);
~vtkSignedCharArray();
private:
//BTX
typedef vtkDataArrayTemplate<signed char> RealSuperclass;
//ETX
vtkSignedCharArray(const vtkSignedCharArray&); // Not implemented.
void operator=(const vtkSignedCharArray&); // Not implemented.
};
#endif
| [
"d.giunchi@scsitaly.com"
] | d.giunchi@scsitaly.com |
bdcb0a162ff1ee0eaa865b31ab7bb6d98bab9a90 | 4d9e5b590eca7ae4099881d42112e811446cdb46 | /14_aggregate_aw/output/application._aggregate_aw/BuildConfig/src/operator/Custom_3.h | 98a71cebff74b2cbcabae7479cc3ec3f302bc701 | [] | no_license | cly1213/2019_NCTU_IBM_Streams_Course | c31b68515fab064f8252384a3dfee4e327dc000f | 342d895f28b8f5d2c44aeadbf1989a50d0e80d1d | refs/heads/master | 2022-01-23T08:59:19.474948 | 2019-07-24T11:50:28 | 2019-07-24T11:50:28 | 198,313,604 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,755 | h | // eJytkE1rwkAQhplrf4VIDwolm0QLdlsKqbVFMUGbeJYl2cbVuLvsB2vy6xtT7aFnGRh43xlmnhktK88aVjFTYzy12ogjBOC3EUDf85CpJUVvdOFUnB6CJH5MVLSfL_0t9kqbNooknKlkX9ffXKNuMP_0Pm3Tn3FM3rqXRZWiUZOy2y9SyarDZZ5Hu7fjcZ0E4cKaKFDUPkhDpoSXKKgvGWlKWiJTF0SxwqiCHABae_1ND4wKZS59yFdLTG_0ERMwUB0Uxt3YD8tzwwTXGG_0ub5GKcZOaNpdLPrg0alltc6LNS6eU7soPvVuy9V4xPq8YXA4fDp_1vAMZwJKeVkLYiZ1CwLdwoPJtxVNI_1yfg_1KWeFzdvXFlfP_1wGmjaOb
#ifndef SPL_OPER_INSTANCE_CUSTOM_3_H_
#define SPL_OPER_INSTANCE_CUSTOM_3_H_
#include <SPL/Runtime/Serialization/NetworkByteBuffer.h>
#include <SPL/Runtime/Operator/Operator.h>
#include <SPL/Runtime/Operator/ParameterValue.h>
#include <SPL/Runtime/Operator/OperatorContext.h>
#include <SPL/Runtime/Operator/OperatorMetrics.h>
#include <SPL/Runtime/Operator/Port/AutoPortMutex.h>
#include <SPL/Runtime/Operator/State/StateHandler.h>
#include <SPL/Runtime/ProcessingElement/PE.h>
#include <SPL/Runtime/Type/SPLType.h>
#include <SPL/Runtime/Utility/CV.h>
using namespace UTILS_NAMESPACE;
#include "../type/BeJwrMSk1NM5NrAjILyjNSSzJzM8rNQdyfR3TU4GMzDwww9AIyCpwTSlNTixJTQEA8PUTA0.h"
#define MY_OPERATOR Custom_3$OP
#define MY_BASE_OPERATOR Custom_3_Base
#define MY_OPERATOR_SCOPE SPL::_Operator
namespace SPL {
namespace _Operator {
class MY_BASE_OPERATOR : public Operator
{
public:
typedef SPL::BeJwrMSk1NM5NrAjILyjNSSzJzM8rNQdyfR3TU4GMzDwww9AIyCpwTSlNTixJTQEA8PUTA0 IPort0Type;
MY_BASE_OPERATOR();
~MY_BASE_OPERATOR();
inline void tupleLogic(Tuple const & tuple, uint32_t port);
void processRaw(Tuple const & tuple, uint32_t port);
inline void punctLogic(Punctuation const & punct, uint32_t port);
void processRaw(Punctuation const & punct, uint32_t port);
void punctPermitProcessRaw(Punctuation const & punct, uint32_t port);
void punctNoPermitProcessRaw(Punctuation const & punct, uint32_t port);
inline void submit(Tuple & tuple, uint32_t port)
{
Operator::submit(tuple, port);
}
inline void submit(Tuple const & tuple, uint32_t port)
{
Operator::submit(tuple, port);
}
inline void submit(Punctuation const & punct, uint32_t port)
{
Operator::submit(punct, port);
}
protected:
Mutex $svMutex;
void checkpointStateVariables(NetworkByteBuffer & opstate) const;
void restoreStateVariables(NetworkByteBuffer & opstate);
void checkpointStateVariables(Checkpoint & ckpt);
void resetStateVariables(Checkpoint & ckpt);
void resetStateVariablesToInitialState();
bool hasStateVariables() const;
void resetToInitialStateRaw();
void checkpointRaw(Checkpoint & ckpt);
void resetRaw(Checkpoint & ckpt);
private:
static bool globalInit_;
static bool globalIniter();
ParameterMapType paramValues_;
ParameterMapType& getParameters() { return paramValues_;}
void addParameterValue(std::string const & param, ConstValueHandle const& value)
{
ParameterMapType::iterator it = paramValues_.find(param);
if (it == paramValues_.end())
it = paramValues_.insert (std::make_pair (param, ParameterValueListType())).first;
it->second.push_back(&ParameterValue::create(value));
}
void addParameterValue(std::string const & param)
{
ParameterMapType::iterator it = paramValues_.find(param);
if (it == paramValues_.end())
it = paramValues_.insert (std::make_pair (param, ParameterValueListType())).first;
it->second.push_back(&ParameterValue::create());
}
};
class MY_OPERATOR : public MY_BASE_OPERATOR
{
public:
};
} // namespace _Operator
} // namespace SPL
#undef MY_OPERATOR_SCOPE
#undef MY_BASE_OPERATOR
#undef MY_OPERATOR
#endif // SPL_OPER_INSTANCE_CUSTOM_3_H_
| [
"leo@MacBook-Air.Home"
] | leo@MacBook-Air.Home |
5ab965945791396c65be4686ea742b9e8d073eee | d4ba636068fc2ffbb45f313012103afb9080c4e3 | /NuGenDimension/NuGenDimension/plugins/TD/TextParamsDlg.cpp | 93a7094358848d498aefd13c960dbb6fd27fb8e6 | [] | no_license | SHAREVIEW/GenXSource | 785ae187531e757860748a2e49d9b6a175c97402 | 5e5fe1d5816560ac41a117210fd40a314536f7a4 | refs/heads/master | 2020-07-20T22:05:24.794801 | 2019-09-06T05:00:39 | 2019-09-06T05:00:39 | 206,716,265 | 0 | 0 | null | 2019-09-06T05:00:12 | 2019-09-06T05:00:12 | null | UTF-8 | C++ | false | false | 6,552 | cpp | // TextParamsDlg.cpp : implementation file
//
#include "stdafx.h"
#include "TextParamsDlg.h"
#include ".\textparamsdlg.h"
// CTextParamsDlg dialog
IMPLEMENT_DYNAMIC(CTextParamsDlg, CDialog)
CTextParamsDlg::CTextParamsDlg(SG_TEXT_STYLE* txt_stl,
IApplicationInterface* appI,
CWnd* pParent /*=NULL*/)
: CDialog(CTextParamsDlg::IDD, pParent)
{
ASSERT(txt_stl);
ASSERT(appI);
m_app = appI;
m_cur_text_style = txt_stl;
m_inFocus = false;
}
CTextParamsDlg::~CTextParamsDlg()
{
}
void CTextParamsDlg::DoDataExchange(CDataExchange* pDX)
{
CDialog::DoDataExchange(pDX);
DDX_Control(pDX, IDC_SYM_HEIGHT_EDIT, m_sym_height);
DDX_Control(pDX, IDC_SYM_ANGLE_EDIT, m_angle);
DDX_Control(pDX, IDC_LINES_SPACE_EDIT, m_lines_space);
DDX_Control(pDX, IDC_SYMB_SPACE_EDIT, m_symb_space);
DDX_Control(pDX, IDC_SYMB_PROPOPT_EDIT, m_s_prop);
}
BEGIN_MESSAGE_MAP(CTextParamsDlg, CDialog)
ON_WM_CHAR()
ON_EN_CHANGE(IDC_SYM_ANGLE_EDIT, OnEnChangeSymAngleEdit)
ON_EN_CHANGE(IDC_SYM_HEIGHT_EDIT, OnEnChangeSymHeightEdit)
ON_EN_CHANGE(IDC_SYMB_SPACE_EDIT, OnEnChangeSymbSpaceEdit)
ON_EN_CHANGE(IDC_LINES_SPACE_EDIT, OnEnChangeLinesSpaceEdit)
ON_EN_CHANGE(IDC_SYMB_PROPOPT_EDIT, OnEnChangeSymbPropoptEdit)
ON_WM_SIZE()
ON_WM_ERASEBKGND()
ON_WM_CTLCOLOR()
ON_EN_SETFOCUS(IDC_SYM_ANGLE_EDIT, OnEnSetfocusTextEdit)
ON_EN_KILLFOCUS(IDC_SYM_ANGLE_EDIT, OnEnKillfocusTextEdit)
ON_EN_SETFOCUS(IDC_SYM_HEIGHT_EDIT, OnEnSetfocusTextEdit)
ON_EN_KILLFOCUS(IDC_SYM_HEIGHT_EDIT, OnEnKillfocusTextEdit)
ON_EN_SETFOCUS(IDC_SYMB_SPACE_EDIT, OnEnSetfocusTextEdit)
ON_EN_KILLFOCUS(IDC_SYMB_SPACE_EDIT, OnEnKillfocusTextEdit)
ON_EN_SETFOCUS(IDC_LINES_SPACE_EDIT, OnEnSetfocusTextEdit)
ON_EN_KILLFOCUS(IDC_LINES_SPACE_EDIT, OnEnKillfocusTextEdit)
ON_EN_SETFOCUS(IDC_SYMB_PROPOPT_EDIT, OnEnSetfocusTextEdit)
ON_EN_KILLFOCUS(IDC_SYMB_PROPOPT_EDIT, OnEnKillfocusTextEdit)
END_MESSAGE_MAP()
// CTextParamsDlg message handlers
static DWORD GetTextExtent(HDC hDC, LPCSTR s, int len)
{
SIZE dim;
DWORD dw;
GetTextExtentPoint32(hDC, s, len, &dim);
dw = ((dim.cy << 16) & 0xFFFF0000)| dim.cx;
return dw;
}
BOOL CTextParamsDlg::OnInitDialog()
{
CDialog::OnInitDialog();
/*m_Previw_wnd.SetTextStylePointer(&m_cur_text_style);
m_Previw_wnd.SetCurFont(m_cur_font);
CRect prR;
GetDlgItem(IDC_PREVIEW_FRAME)->GetWindowRect(prR);
ScreenToClient(prR);
InflateRect(&prR,-3,-3);
m_Previw_wnd.Create(NULL,NULL,WS_CHILD|WS_VISIBLE,prR,this,101);*/
if (!sgFontManager::GetFont(sgFontManager::GetCurrentFont()))
return false;
m_sym_height.SetValue((float)m_cur_text_style->height);
m_angle.SetValue((float)m_cur_text_style->angle);
m_lines_space.SetValue((float)m_cur_text_style->vert_space_proportion);
m_symb_space.SetValue((float)m_cur_text_style->horiz_space_proportion);
m_s_prop.SetValue((float)m_cur_text_style->proportions);
return TRUE; // return TRUE unless you set the focus to a control
// EXCEPTION: OCX Property Pages should return FALSE
}
void CTextParamsDlg::OnOK()
{
}
void CTextParamsDlg::OnCancel()
{
}
static void setlitext_1(DWORD_PTR i, char *buf) // #NOT USED
{
buf[0] = buf[1] = '%';
buf[2] = buf[3] = buf[4] = buf[5] = 0;
if(i < 100) buf[2] = '0';
if(i < 10) buf[3] = '0';
//_itoa(i, &buf[lstrlen(buf)], 10);#OBSOLETE RISK
_itoa_s(i,&buf[lstrlen(buf)],1024,10);
}
void CTextParamsDlg::OnChar(UINT nChar, UINT nRepCnt, UINT nFlags)
{
CDialog::OnChar(nChar, nRepCnt, nFlags);
}
void CTextParamsDlg::OnEnChangeSymAngleEdit()
{
if (::IsWindow(m_angle.m_hWnd))
{
m_cur_text_style->angle = m_angle.GetValue();
if (m_cur_text_style->angle>90.0)
m_cur_text_style->angle=90.0;
if (m_cur_text_style->angle<0.0)
m_cur_text_style->angle=0.0;
//m_angle.SetValue((float)m_cur_text_style->angle);
}
if (m_app)
m_app->GetViewPort()->InvalidateViewPort();
}
void CTextParamsDlg::OnEnChangeSymHeightEdit()
{
//UpdateData();
m_cur_text_style->height = m_sym_height.GetValue();
//m_Previw_wnd.ChangeStyle();
if (m_app)
m_app->GetViewPort()->InvalidateViewPort();
}
void CTextParamsDlg::OnEnChangeSymbSpaceEdit()
{
//UpdateData();
m_cur_text_style->horiz_space_proportion = m_symb_space.GetValue();
if (m_app)
m_app->GetViewPort()->InvalidateViewPort();
}
void CTextParamsDlg::OnEnChangeLinesSpaceEdit()
{
//UpdateData();
m_cur_text_style->vert_space_proportion = m_lines_space.GetValue();
if (m_app)
m_app->GetViewPort()->InvalidateViewPort();
}
void CTextParamsDlg::OnEnChangeSymbPropoptEdit()
{
//UpdateData();
m_cur_text_style->proportions = m_s_prop.GetValue();
if (m_app)
m_app->GetViewPort()->InvalidateViewPort();
}
void CTextParamsDlg::OnSize(UINT nType, int cx, int cy)
{
__super::OnSize(nType, cx, cy);
if (::IsWindow(m_sym_height.m_hWnd))
{
CRect rrr;
m_sym_height.GetWindowRect(rrr);
ScreenToClient(rrr);
m_sym_height.MoveWindow(rrr.left,rrr.top,cx-2*rrr.left,rrr.Height());
m_angle.GetWindowRect(rrr);
ScreenToClient(rrr);
m_angle.MoveWindow(rrr.left,rrr.top,cx-2*rrr.left,rrr.Height());
m_lines_space.GetWindowRect(rrr);
ScreenToClient(rrr);
m_lines_space.MoveWindow(rrr.left,rrr.top,cx-2*rrr.left,rrr.Height());
m_symb_space.GetWindowRect(rrr);
ScreenToClient(rrr);
m_symb_space.MoveWindow(rrr.left,rrr.top,cx-2*rrr.left,rrr.Height());
m_s_prop.GetWindowRect(rrr);
ScreenToClient(rrr);
m_s_prop.MoveWindow(rrr.left,rrr.top,cx-2*rrr.left,rrr.Height());
}
}
BOOL CTextParamsDlg::OnEraseBkgnd(CDC* pDC)
{
// TODO: Add your message handler code here and/or call default
return TRUE;//__super::OnEraseBkgnd(pDC);
}
HBRUSH CTextParamsDlg::OnCtlColor(CDC* pDC, CWnd* pWnd, UINT nCtlColor)
{
// Call the base class implementation first! Otherwise, it may
// undo what we are trying to accomplish here.
HBRUSH hbr = CDialog::OnCtlColor(pDC, pWnd, nCtlColor);
if (m_angle.m_hWnd==pWnd->m_hWnd ||
m_s_prop.m_hWnd==pWnd->m_hWnd ||
m_symb_space.m_hWnd==pWnd->m_hWnd)
return hbr;
if (nCtlColor==CTLCOLOR_STATIC && (pWnd!=&m_sym_height))
{
pDC->SetTextColor(0);
//pDC->SetBkColor(RGB(255,255,255));
pDC->SetBkMode(TRANSPARENT);
hbr = (HBRUSH)GetStockObject(HOLLOW_BRUSH);
}
return hbr;
}
void CTextParamsDlg::OnEnSetfocusTextEdit()
{
m_inFocus = true;
}
void CTextParamsDlg::OnEnKillfocusTextEdit()
{
m_inFocus = false;
}
| [
"nystrom.anthony@gmail.com"
] | nystrom.anthony@gmail.com |
6941383e7aba27dc4368be071693b1fcf9a5f685 | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /device/bluetooth/dbus/dbus_thread_manager_linux.cc | adffe9b746a97bc3da48503b256e0bf45cb3941a | [
"BSD-3-Clause"
] | permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | C++ | false | false | 2,232 | cc | // Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "device/bluetooth/dbus/dbus_thread_manager_linux.h"
#include "base/threading/thread.h"
#include "dbus/bus.h"
namespace bluez {
static DBusThreadManagerLinux* g_linux_dbus_manager = NULL;
DBusThreadManagerLinux::DBusThreadManagerLinux() {
base::Thread::Options thread_options;
thread_options.message_loop_type = base::MessageLoop::TYPE_IO;
dbus_thread_.reset(new base::Thread("D-Bus thread"));
dbus_thread_->StartWithOptions(thread_options);
// Create the connection to the system bus.
dbus::Bus::Options system_bus_options;
system_bus_options.bus_type = dbus::Bus::SYSTEM;
system_bus_options.connection_type = dbus::Bus::PRIVATE;
system_bus_options.dbus_task_runner = dbus_thread_->task_runner();
system_bus_ = new dbus::Bus(system_bus_options);
}
DBusThreadManagerLinux::~DBusThreadManagerLinux() {
// Shut down the bus. During the browser shutdown, it's ok to shut down
// the bus synchronously.
if (system_bus_.get())
system_bus_->ShutdownOnDBusThreadAndBlock();
// Stop the D-Bus thread.
if (dbus_thread_)
dbus_thread_->Stop();
if (!g_linux_dbus_manager)
return; // Called form Shutdown() or local test instance.
// There should never be both a global instance and a local instance.
CHECK(this == g_linux_dbus_manager);
}
dbus::Bus* DBusThreadManagerLinux::GetSystemBus() {
return system_bus_.get();
}
// static
void DBusThreadManagerLinux::Initialize() {
CHECK(!g_linux_dbus_manager);
g_linux_dbus_manager = new DBusThreadManagerLinux();
}
// static
void DBusThreadManagerLinux::Shutdown() {
// Ensure that we only shutdown LinuxDBusManager once.
CHECK(g_linux_dbus_manager);
DBusThreadManagerLinux* dbus_thread_manager = g_linux_dbus_manager;
g_linux_dbus_manager = NULL;
delete dbus_thread_manager;
VLOG(1) << "LinuxDBusManager Shutdown completed";
}
// static
DBusThreadManagerLinux* DBusThreadManagerLinux::Get() {
CHECK(g_linux_dbus_manager)
<< "LinuxDBusManager::Get() called before Initialize()";
return g_linux_dbus_manager;
}
} // namespace bluez
| [
"enrico.weigelt@gr13.net"
] | enrico.weigelt@gr13.net |
a0a7ca2985dba9c05d01fb489c02d7970e9a34f2 | d065c3b1b4bd11802a7c7e816a3eab631a5c9b20 | /zamjenarijeciustringu_19022020.cpp | e6a64afeace30b5ff735db7c6ccfe5ca693af5bf | [] | no_license | benjaminbarbaric7/Racunalstvo | 242ff532b9272786d3a25b5c3a85a3ad9b6905a3 | c22729cd57339d84aa9770bf46c4b56779ddf022 | refs/heads/master | 2020-07-28T03:22:05.674246 | 2020-03-18T13:17:34 | 2020-03-18T13:17:34 | 209,292,099 | 2 | 2 | null | null | null | null | UTF-8 | C++ | false | false | 498 | cpp | #include <iostream>
#include <string.h>
/* run this program using the console pauser or add your own getch, system("pause") or input loop */
using namespace std;
int main() {
string velikaVijest=
"Mislim da sam vidio Elvise u UFO. "
"Danas sam izmjenio program. ";
string zamjena= "CIA";
string nadjiMe= "UFO";
int i= velikaVijest.find(nadjiMe, 0);
if (i != string::npos)
velikaVijest.replace(i,zamjena.size(),zamjena);
cout<<velikaVijest<<endl;
return 0;
}
| [
"noreply@github.com"
] | noreply@github.com |
75f1132f3fcd9ca928af012fea9454bd518f885e | 1438ed7be4f193b784d47af1b8536bb7eee938db | /Utilities/Configurations.hpp | d098eb9e65b87a943d108fab81af4ff0a8bc595a | [] | no_license | happyfeetx/Oxygen | 266301c854c0119f4405d1990f23e512f9635903 | 5d1583d563f7de531e7b19050b1ead56506f44d9 | refs/heads/master | 2020-05-16T21:29:45.053864 | 2019-04-24T21:20:50 | 2019-04-24T21:20:50 | 183,308,471 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,489 | hpp | #include <cstdio>
#include <cstdlib>
#include <string>
#include <exception>
#include "oxygen.hpp"
namespace Discord::Utilities {
enum TokenType {
Bot = 0,
User = 1,
Bearer = 2
};
struct ClientConfiguration {
public:
std::string Token; // Login token of the bot. Used to identify and authenticate with Discord servers.
TokenType TokenType; // Token type of the bot
int Timeout; // Time disconnected before the bot will quit.
int MessageCache; // The amount of user messages that the bot will hold in memory. Max 10,000 messages.
bool AutomaticRetry; // Automatically attempts to reconnect the client to Discord servers.
bool UseDefaultHelpResponse; // Use the default Help dialogue response. Enabled by default.
bool EnableDMs; // Enable the bot to receive and respond to DMs from users. Disabled by default.
ClientConfiguration() {
Token = botToken;
TokenType = TokenType::Bot;
Timeout = 2500;
MessageCache = 5000;
AutomaticRetry = false;
UseDefaultHelpResponse = true;
EnableDMs = false;
}
private:
std::string botToken;
};
struct CommandsConfiguration {
public:
int CooldownPerUser;
bool UseDefaultCommandHandler;
CommandsConfiguration() {
CooldownPerUser = 1;
UseDefaultCommandHandler = true;
}
};
} | [
"scientificmartian@gmail.com"
] | scientificmartian@gmail.com |
a4bad136caa30e76a14dfde9a87b71f5272a2f55 | e973c673187de92e1753b5d1756363529182f51d | /Q20.cpp | 87031703fed483242bb5839f3b03e967a32fd332 | [] | no_license | rishikesh-2828/git-tut | 21cebcf896f236f326aba75ed1c16a4ca9e633a7 | 2cb21f1cdd06d38320c9e41a0e6060c1da1e5c63 | refs/heads/master | 2023-04-13T11:52:30.196292 | 2021-05-01T19:44:26 | 2021-05-01T19:44:26 | 363,500,438 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 891 | cpp | #include<bits/stdc++.h>
using namespace std;
int checkMax(vector<int> v,int n);
vector<int> flip(vector<int> v,int k);
vector<int> pancakeSort(vector<int> v)
{
for(int i=v.size()-1;i>=0;i--)
{
int mi=checkMax(v,i);
if(mi!=i)
{
v=flip(v,mi);
v=flip(v,i);
}
}
return v;
}
vector<int> flip(vector<int> v,int k)
{
for(int i=0,j=k ; i<j ; i++,j--)
{
swap(v[i],v[j]);
}
return v;
}
int checkMax(vector<int> v,int n)
{
int maxi=INT_MIN,mi;
for(int i=0;i<=n;i++)
{
if(v[i]>maxi)
{
maxi=v[i];
mi=i;
}
}
return mi;
}
int main()
{
vector<int> v;
int n=5,p;
for(int i=0;i<5;i++)
{
cin>>p;
v.push_back(p);
}
v= pancakeSort(v);
for(int i=0;i<5;i++)
{
cout<<v[i]<<" ";
}
} | [
"Rishi@kesh.com"
] | Rishi@kesh.com |
d4636c7e7ad1415c68607440d2845846671c040b | f81fc050989f1eb9df5d8ef91e469c8f6d4d7f27 | /11233.cpp | a0778e32ae24dce5ed9736d465d61b8b1e4f9638 | [] | no_license | a-pramanik37/Solved-problems-of-UVA | 9102e583b7e5c091992c648efed238199e64567c | 71e5869dc47b4ac84d96896676e1cd31f7e4245a | refs/heads/master | 2020-05-27T10:03:03.210301 | 2019-05-25T14:51:22 | 2019-05-25T14:51:22 | 188,575,705 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,406 | cpp | #include <iostream>
#include <cstdio>
#include <cstring>
#include <cmath>
#include <cstdlib>
#include <algorithm>
using namespace std;
char a[200][50], b[200][50];
int main()
{
int i, j,k, l, n, p, q, flg, x;
char c[200][50];
while(scanf("%d%d", &l, &n)==2){
for(i = 1; i<=l; i++){
scanf("%s%s", a[i], b[i]);
}
for(i = 1; i<=n; i++){
scanf("%s", c[i]);
}
for(x = 1; x<=n; x++){
p = strlen(c[x])-1;
flg = 0;
for(j = 1; j<=l; j++){
if(strcmp(c[x], a[j])==0){
printf("%s\n", b[j]);
flg = 1;
break;
}
}
if(flg==0){
if(c[x][p]=='y' && c[x][p-1]!='a' && c[x][p-1]!='e' && c[x][p-1]!='i'&&c[x][p-1]!='o' &&c[x][p-1]!='u'){
for(j = 0; j<strlen(c[x])-1; j++){
printf("%c", c[x][j]);
}
printf("ies\n");
}
else if(c[x][p]=='o' || c[x][p]=='s'||(c[x][p-1]=='c'&&c[x][p]=='h')||(c[x][p-1]=='s' && c[x][p]=='h') || c[x][p]=='x'){
printf("%s", c[x]);
printf("es\n");
}
else{
printf("%s", c[x]);
printf("s\n");
}
}
}
}
return 0;
}
| [
"bappycseju37@gmail.com"
] | bappycseju37@gmail.com |
36c4c232d799dd20ac33897c21fa47ea1f6ba9c8 | ceafc3832205dda86dbf0c2260f8ddb1a0c3bca0 | /examples/helloworld/helloworld.ino | fc813681c1c544808a4f9201263764a11d8473c9 | [] | no_license | nwjchung/arduino-lifegraph | 904c78976495078fea65904ba016519109e9db60 | 1c2be09e51f970bbea4b0dfae2cd3e4d67ed13ce | refs/heads/master | 2020-11-30T23:35:38.329282 | 2013-05-12T16:33:00 | 2013-05-12T16:33:00 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 1,564 | ino | /*
* Lifegraph Facebook Demo
* This sketch is released to the public domain.
*/
#include <SoftwareSerial.h>
#include <WiFlyHQ.h>
#include <Lifegraph.h>
SoftwareSerial wifiSerial(9, 10);
/**
* Configuration
*/
// Wifi configuration for a WPA network.
const char mySSID[] = "...";
const char myPassword[] = "...";
// Pin our LED is connected to.
int light = 13;
/**
* Setup
*/
// Store physical ID from RFID tag.
uint8_t physicalid[8] = { 0 };
void setup()
{
// Setup ports.
Serial.begin(9600);
wifiSerial.begin(9600);
pinMode(light, OUTPUT);
// Setup network connection.
Serial.println(F("Connecting to Wifi..."));
if (!connectWifi(&wifiSerial, mySSID, myPassword)) {
Serial.println(F("Failed to join network."));
while (true) {
// Hang forever.
}
} else {
Serial.println(F("Joined wifi network."));
}
}
void loop () {
// Make an HTTP request for graph.facebook.com/lifegraphlabs
Facebook.get ( NULL, "lifegraphlabs" );
int status_code = Facebook.request();
// The request is successful if it returns an HTTP status code
// of "200" (HTTP OK). Update the light accordingly.
digitalWrite(light, status_code == 200 ? HIGH : LOW);
// Notify terminal of our status.
Serial.print("HTTP Status Code: ");
Serial.println(status_code);
// If successful, stop making requests.
// (Wifly occasionally returns "0" instead of "200".)
while (status_code == 200) {
// Hang forever
}
// Otherwise, delay, and repeat until we make a successful HTTP request.
delay(3000);
} | [
"id@timryan.org"
] | id@timryan.org |
a3a0a8f1533e6470544b58bf559f5990c70943ee | 7bfdaa5be493d8e98ff6f97be9a297e8ee98285b | /10 - C++ Programming/CountingStars.cpp | d7a1f4a9a341317cfb57484b9f2d98c794c7548f | [] | no_license | GhulamMustafaGM/C-Programming | a21795ff9183462d41bb6916966342c4059cd2e2 | 4db74317e85ea883dbef173f02d937765ee2a6f5 | refs/heads/master | 2023-03-16T23:31:45.024962 | 2021-02-18T22:25:25 | 2021-02-18T22:25:25 | 194,243,282 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 770 | cpp | // Counting stars app
#include <iostream>
using namespace std;
int main()
{
int a, b, b1, resA, resA1, resA2;
cin >> a;
if (a >= 100 && a < 1000)
{
resA = a / 100; //the first number
resA1 = a % 10; //the last number
b = a - resA1;
b1 = b % 100;
resA2 = b1 / 10; //the second number
cout << "The first number is: " << resA << endl;
cout << "The second number is: " << resA2 << endl;
cout << "The last number is: " << resA1 << endl;
}
else if (a < 100)
{
resA = a / 10; //the first number
resA1 = a % 10; // the last number
cout << "The first number is: " << resA << endl;
cout << "The last number is: " << resA1 << endl;
}
return 0;
}
| [
"mustafaji@gmail.com"
] | mustafaji@gmail.com |
1edd9ccdaa7f89c96e7365c089d96569cb19d89a | fa342ea2fef279a2b76c89ea2a2a804a0d7e0468 | /Mandatory1/SIMPLE_KE_20_200_40/0.002/k | 9049e3bb3850583d347ee3c16baa66a67e33c0b4 | [] | no_license | JDTyvand/OF_MEK4420 | f433a8fbc056c2d7e2261c792e5241ed4c9e7079 | 0ab7dcf805afe972d809bb884be12d60bc1933be | refs/heads/master | 2016-08-06T18:35:38.441170 | 2015-10-01T19:13:42 | 2015-10-01T19:13:42 | 42,505,059 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 158,942 | /*--------------------------------*- C++ -*----------------------------------*\
| ========= | |
| \\ / F ield | OpenFOAM: The Open Source CFD Toolbox |
| \\ / O peration | Version: 2.4.0 |
| \\ / A nd | Web: www.OpenFOAM.org |
| \\/ M anipulation | |
\*---------------------------------------------------------------------------*/
FoamFile
{
version 2.0;
format ascii;
class volScalarField;
location "0.002";
object k;
}
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
dimensions [0 2 -2 0 0 0 0];
internalField nonuniform List<scalar>
17600
(
0.58903
0.787733
0.941489
1.04569
1.10669
1.13449
1.13897
1.1283
1.1086
1.08414
1.05779
1.0314
1.00611
0.982592
0.961162
0.941885
0.924586
0.908775
0.89352
0.87631
0.40944
0.408541
0.403039
0.397435
0.392441
0.388624
0.386546
0.386655
0.38927
0.394584
0.402677
0.413525
0.426995
0.442848
0.460737
0.480214
0.500753
0.521785
0.543113
0.564422
0.378978
0.37161
0.358824
0.344059
0.328946
0.314506
0.301446
0.290251
0.28123
0.274565
0.270349
0.268615
0.26936
0.272552
0.278142
0.28607
0.296287
0.308838
0.32422
0.345034
0.370717
0.363373
0.353456
0.341914
0.329538
0.316943
0.3046
0.292862
0.281992
0.272175
0.263543
0.256184
0.250156
0.245501
0.242255
0.240455
0.240173
0.241583
0.245192
0.252746
0.368689
0.361459
0.353051
0.343668
0.333631
0.323253
0.312806
0.302513
0.292554
0.283065
0.274151
0.26589
0.258337
0.251532
0.245506
0.240285
0.235908
0.23247
0.230208
0.22992
0.36811
0.360967
0.353339
0.345184
0.336605
0.327748
0.318763
0.309788
0.30094
0.292316
0.283995
0.276039
0.268493
0.261393
0.254765
0.248627
0.242999
0.237921
0.233481
0.230023
0.367938
0.360858
0.353646
0.346201
0.338525
0.33067
0.322711
0.314729
0.306797
0.298981
0.291338
0.283915
0.27675
0.26987
0.263299
0.257051
0.251137
0.245577
0.240397
0.235761
0.367886
0.360848
0.353863
0.346835
0.339724
0.332533
0.325287
0.318029
0.310801
0.303645
0.296601
0.289702
0.282976
0.276446
0.27013
0.26404
0.258184
0.25257
0.247203
0.242173
0.36787
0.360857
0.353997
0.347213
0.340455
0.3337
0.326949
0.320214
0.313515
0.306878
0.300326
0.293882
0.287566
0.281394
0.27538
0.269533
0.263857
0.258358
0.253032
0.247939
0.367864
0.360866
0.354073
0.347428
0.340886
0.334416
0.328001
0.321639
0.315334
0.309095
0.302936
0.296869
0.290906
0.28506
0.279339
0.273749
0.268292
0.262971
0.257782
0.252771
0.367862
0.360871
0.354112
0.347545
0.341132
0.334842
0.328653
0.322552
0.316532
0.310593
0.304739
0.298975
0.293307
0.287741
0.282282
0.276934
0.271696
0.26657
0.261553
0.256686
0.36786
0.360872
0.35413
0.347605
0.341268
0.335089
0.329046
0.323122
0.317304
0.311585
0.305963
0.300436
0.295006
0.289673
0.284439
0.279305
0.274269
0.269332
0.264493
0.259791
0.367859
0.360872
0.354138
0.347635
0.341339
0.335227
0.329276
0.323468
0.31779
0.312229
0.306778
0.301431
0.296187
0.291042
0.285994
0.281043
0.276185
0.271419
0.266748
0.26221
0.367858
0.360871
0.354141
0.347649
0.341376
0.335301
0.329406
0.323674
0.318087
0.312635
0.307307
0.302095
0.296992
0.291994
0.287096
0.282295
0.277586
0.27297
0.26845
0.264063
0.367857
0.36087
0.354141
0.347654
0.341393
0.33534
0.329478
0.323791
0.318265
0.312886
0.307644
0.302527
0.297529
0.292642
0.287861
0.28318
0.278594
0.274102
0.269711
0.265457
0.367856
0.360868
0.35414
0.347656
0.3414
0.335359
0.329516
0.323857
0.318368
0.313037
0.307852
0.302802
0.297879
0.293074
0.28838
0.283791
0.279302
0.274911
0.270627
0.266484
0.367856
0.360867
0.354139
0.347655
0.341403
0.335367
0.329535
0.323892
0.318427
0.313125
0.307977
0.302972
0.298101
0.293354
0.288723
0.284203
0.279787
0.275474
0.271275
0.267222
0.367856
0.360867
0.354138
0.347655
0.341404
0.335371
0.329544
0.323911
0.318458
0.313175
0.30805
0.303074
0.298236
0.293528
0.288941
0.284469
0.280105
0.27585
0.271714
0.267729
0.367855
0.360866
0.354137
0.347654
0.341403
0.335372
0.329548
0.32392
0.318474
0.313201
0.30809
0.30313
0.298312
0.293628
0.289069
0.284627
0.280298
0.276081
0.271989
0.268053
0.367855
0.360866
0.354137
0.347654
0.341403
0.335373
0.32955
0.323923
0.318481
0.313212
0.308107
0.303155
0.298347
0.293674
0.289128
0.284702
0.280392
0.276196
0.272129
0.268222
0.367855
0.360866
0.354137
0.347654
0.341403
0.335373
0.32955
0.323923
0.318481
0.313212
0.308107
0.303155
0.298348
0.293675
0.289131
0.284706
0.280399
0.276208
0.27215
0.268253
0.367855
0.360866
0.354137
0.347654
0.341404
0.335373
0.329549
0.32392
0.318474
0.313201
0.30809
0.303131
0.298315
0.293632
0.289075
0.284639
0.28032
0.276118
0.272051
0.268148
0.367856
0.360867
0.354138
0.347655
0.341404
0.335371
0.329544
0.323911
0.318458
0.313175
0.308051
0.303076
0.298239
0.293534
0.288952
0.284489
0.280142
0.275912
0.271819
0.267891
0.367856
0.360867
0.354139
0.347655
0.341403
0.335368
0.329535
0.323893
0.318427
0.313126
0.307979
0.302975
0.298106
0.293363
0.28874
0.284232
0.27984
0.275564
0.271426
0.267455
0.367856
0.360868
0.35414
0.347656
0.3414
0.335359
0.329516
0.323857
0.318369
0.313038
0.307854
0.302806
0.297885
0.293086
0.288402
0.28383
0.279372
0.27503
0.270828
0.266796
0.367857
0.36087
0.354141
0.347654
0.341393
0.33534
0.329478
0.323792
0.318266
0.312888
0.307646
0.302532
0.297537
0.292657
0.287888
0.283228
0.278681
0.274252
0.269966
0.265853
0.367858
0.360871
0.354141
0.347649
0.341376
0.335301
0.329407
0.323674
0.318088
0.312637
0.30731
0.3021
0.297002
0.292012
0.287129
0.282354
0.277692
0.273153
0.268761
0.26455
0.367859
0.360872
0.354138
0.347635
0.341339
0.335227
0.329276
0.323469
0.317791
0.31223
0.306781
0.301438
0.296199
0.291063
0.286033
0.281113
0.276309
0.271636
0.267118
0.262792
0.36786
0.360872
0.35413
0.347605
0.341268
0.335089
0.329046
0.323122
0.317305
0.311587
0.305967
0.300444
0.295019
0.289698
0.284484
0.279386
0.274413
0.269583
0.264921
0.260467
0.367862
0.360871
0.354112
0.347545
0.341132
0.334843
0.328654
0.322552
0.316533
0.310595
0.304743
0.298983
0.293322
0.287769
0.282334
0.277026
0.271859
0.266852
0.262034
0.257446
0.367864
0.360866
0.354073
0.347428
0.340886
0.334416
0.328002
0.32164
0.315335
0.309098
0.302941
0.296878
0.290924
0.285092
0.279396
0.273851
0.268471
0.263277
0.258304
0.25359
0.36787
0.360857
0.353997
0.347213
0.340455
0.3337
0.326949
0.320214
0.313517
0.306881
0.300332
0.293893
0.287585
0.281429
0.275443
0.269644
0.264049
0.258679
0.25357
0.248764
0.367886
0.360848
0.353863
0.346835
0.339725
0.332533
0.325288
0.31803
0.310803
0.303649
0.296607
0.289714
0.282997
0.276485
0.270198
0.264156
0.258378
0.252885
0.247703
0.24289
0.367938
0.360858
0.353646
0.346201
0.338525
0.33067
0.322712
0.31473
0.306799
0.298985
0.291345
0.283928
0.276773
0.26991
0.263367
0.257163
0.251316
0.245842
0.240753
0.236124
0.36811
0.360967
0.353339
0.345184
0.336605
0.327748
0.318764
0.309789
0.300942
0.292321
0.284003
0.276052
0.268516
0.261431
0.254825
0.248716
0.243116
0.238034
0.233459
0.229457
0.368689
0.361459
0.353051
0.343668
0.333631
0.323253
0.312806
0.302515
0.292556
0.283069
0.274158
0.265902
0.258355
0.251557
0.245533
0.240296
0.23585
0.232183
0.229251
0.227043
0.370717
0.363373
0.353456
0.341914
0.329538
0.316943
0.3046
0.292863
0.281993
0.272178
0.263547
0.256187
0.250153
0.245481
0.242184
0.24026
0.239685
0.2404
0.242286
0.245116
0.378978
0.37161
0.358824
0.344059
0.328947
0.314506
0.301446
0.290251
0.281229
0.274562
0.27034
0.268593
0.269309
0.27244
0.277904
0.285576
0.295275
0.306744
0.319646
0.333413
0.40944
0.408541
0.403039
0.397435
0.39244
0.388624
0.386544
0.386653
0.389264
0.394574
0.40266
0.413496
0.426951
0.442784
0.460654
0.480128
0.500711
0.521885
0.543139
0.564174
0.58903
0.787733
0.94149
1.04569
1.10669
1.13449
1.13898
1.12832
1.10863
1.0842
1.0579
1.03161
1.00649
0.983269
0.962372
0.944045
0.928437
0.9157
0.905934
0.899815
1.12674
1.42311
1.73032
2.01218
2.23268
2.37744
2.45369
2.47978
2.47523
2.45547
2.43091
2.40799
2.39063
2.38118
2.38097
2.39067
2.41036
2.43969
2.47793
2.52409
2.57696
2.63524
2.69755
2.76256
2.82898
2.89561
2.96136
3.02527
3.08651
3.14439
3.1983
3.24766
3.29175
3.32972
3.3597
3.38127
3.39397
3.39724
3.39044
3.37285
3.34364
3.3019
3.24664
3.17686
3.09157
2.98991
2.87127
2.73545
2.5828
2.41442
2.23234
2.03955
1.84007
1.63874
1.44094
1.25214
1.0774
0.920849
0.785349
0.672311
0.581746
0.512465
0.462378
0.428803
0.408753
0.399195
0.397277
0.400508
0.406862
0.414812
0.42328
0.431566
0.43926
0.446157
0.452193
0.457394
0.461841
0.465642
0.468922
0.47181
0.47444
0.47695
0.479488
0.482215
0.485308
0.48896
0.493376
0.498757
0.505275
0.513047
0.522102
0.532361
0.54363
0.555616
0.567959
0.580271
0.592179
0.603354
0.613531
0.622517
0.630187
0.636475
0.641361
0.644889
0.647111
0.64809
0.647908
0.646657
0.644435
0.641338
0.637463
0.632902
0.62774
0.622058
0.615929
0.609421
0.602595
0.595505
0.5882
0.580725
0.573116
0.56541
0.557635
0.549817
0.54198
0.534143
0.526323
0.518536
0.510793
0.503105
0.495482
0.487932
0.48046
0.473072
0.465773
0.458566
0.451454
0.44444
0.437526
0.430712
0.424001
0.417392
0.410886
0.404484
0.398185
0.39199
0.385897
0.379908
0.374021
0.368237
0.362554
0.356972
0.35149
0.346109
0.340827
0.335644
0.33056
0.325575
0.320688
0.315899
0.31121
0.306619
0.302129
0.29774
0.293454
0.289274
0.285202
0.281243
0.2774
0.273682
0.270096
0.266651
0.263361
0.260242
0.257312
0.254599
0.252132
0.249954
0.248116
0.246688
0.24576
0.245458
0.245956
0.247505
0.25048
0.255462
0.263384
0.275758
0.295041
0.325097
0.371633
0.442457
0.54772
0.701007
0.923107
1.24788
1.73396
2.42286
3.43315
2.0757
0.594126
0.652904
0.747089
0.882041
1.0561
1.25771
1.46878
1.67007
1.84654
1.99044
2.1012
2.18329
2.24349
2.28877
2.32518
2.35748
2.38916
2.42265
2.45948
2.50044
2.54576
2.5952
2.6482
2.70394
2.76152
2.81995
2.87823
2.93545
2.99074
3.04332
3.09252
3.13758
3.17769
3.21111
3.23643
3.25331
3.26112
3.25915
3.24662
3.2227
3.18647
3.13694
3.07313
2.99408
2.89894
2.78712
2.65838
2.51304
2.35209
2.1774
1.9918
1.79907
1.60387
1.41141
1.22706
1.05587
0.902028
0.768512
0.656875
0.567262
0.498604
0.448905
0.415559
0.39564
0.386165
0.384321
0.387642
0.394116
0.402214
0.410851
0.419314
0.427172
0.434205
0.440336
0.44558
0.45001
0.453729
0.456857
0.459516
0.461827
0.463913
0.465893
0.467895
0.470053
0.472517
0.475451
0.479033
0.483447
0.488863
0.495419
0.503189
0.512161
0.522222
0.53316
0.544684
0.556453
0.568111
0.579326
0.589808
0.599329
0.607722
0.614879
0.620741
0.625327
0.628649
0.630746
0.631681
0.631531
0.630381
0.628321
0.625442
0.621831
0.617575
0.612752
0.607436
0.601696
0.595595
0.589188
0.582527
0.575657
0.568619
0.561449
0.554179
0.546837
0.539448
0.532033
0.52461
0.517196
0.509806
0.50245
0.49514
0.487884
0.480689
0.473563
0.466511
0.459537
0.452645
0.445837
0.439118
0.432488
0.425949
0.419504
0.413152
0.406895
0.400733
0.394666
0.388696
0.382821
0.377042
0.371359
0.365772
0.360281
0.354885
0.349584
0.344378
0.339267
0.33425
0.329328
0.324501
0.319769
0.315132
0.310591
0.306147
0.301802
0.297556
0.293412
0.289374
0.285445
0.28163
0.277935
0.274368
0.270939
0.267661
0.264547
0.261619
0.258901
0.256422
0.254225
0.252358
0.250891
0.249911
0.24954
0.249946
0.251372
0.254181
0.258933
0.266531
0.278458
0.297166
0.326604
0.372778
0.444125
0.551753
0.710427
0.942099
1.28232
1.78931
2.51827
3.497
2.42549
0.354524
0.369209
0.39203
0.427553
0.481125
0.55768
0.660363
0.789095
0.939852
1.10514
1.27553
1.44177
1.59658
1.73566
1.85775
1.96393
2.05664
2.1389
2.21358
2.28315
2.34949
2.41393
2.47724
2.53977
2.60153
2.66227
2.72155
2.77884
2.83357
2.88514
2.93294
2.97624
3.01409
3.04378
3.06516
3.07762
3.08044
3.07288
3.0541
3.02321
2.97929
2.92139
2.84862
2.7602
2.6556
2.53461
2.39754
2.24535
2.07982
1.90362
1.72035
1.53443
1.35085
1.17478
1.01112
0.863942
0.736192
0.629417
0.543799
0.478328
0.431087
0.399563
0.380936
0.372339
0.371071
0.374771
0.381508
0.389811
0.398627
0.407253
0.415258
0.422413
0.428632
0.43392
0.438344
0.442003
0.445012
0.447491
0.449558
0.451324
0.452899
0.454387
0.455894
0.457534
0.45943
0.46172
0.464554
0.468094
0.472503
0.477925
0.484465
0.492165
0.500983
0.510788
0.521364
0.532427
0.543659
0.554737
0.565362
0.575277
0.584277
0.592216
0.598998
0.604604
0.609009
0.612226
0.614292
0.615267
0.615223
0.614238
0.612397
0.609783
0.606477
0.602559
0.598101
0.593174
0.587841
0.582159
0.576182
0.569957
0.563527
0.55693
0.5502
0.543366
0.536456
0.529493
0.522496
0.515484
0.508472
0.501473
0.4945
0.487562
0.480668
0.473826
0.467041
0.46032
0.453666
0.447084
0.440578
0.434149
0.4278
0.421534
0.415352
0.409255
0.403244
0.397321
0.391486
0.385739
0.380081
0.374513
0.369035
0.363646
0.358347
0.353138
0.34802
0.342992
0.338054
0.333207
0.328451
0.323787
0.319214
0.314734
0.310348
0.306057
0.301864
0.297769
0.293778
0.289892
0.286118
0.282461
0.278928
0.275531
0.27228
0.26919
0.266281
0.263576
0.261107
0.25891
0.257038
0.255554
0.254546
0.25413
0.254471
0.255802
0.25847
0.263013
0.270297
0.281759
0.299808
0.328416
0.373796
0.444915
0.553756
0.716119
0.954653
1.30526
1.8218
2.56354
3.45711
2.726
0.256197
0.26115
0.268021
0.277854
0.292275
0.313537
0.3445
0.388364
0.448156
0.526068
0.622824
0.737265
0.866345
1.00556
1.14969
1.29371
1.43349
1.56626
1.69059
1.80622
1.91362
2.01363
2.10719
2.19507
2.27787
2.35593
2.42939
2.49823
2.56226
2.62124
2.67476
2.72226
2.76244
2.79295
2.81386
2.82465
2.82471
2.81334
2.78979
2.75326
2.70294
2.63809
2.55806
2.46243
2.35111
2.2245
2.08358
1.9301
1.76662
1.59657
1.42413
1.25399
1.09101
0.939734
0.803988
0.686472
0.588589
0.510453
0.451062
0.408579
0.38062
0.364539
0.357667
0.35751
0.361895
0.369053
0.377629
0.386642
0.39543
0.403574
0.410847
0.417154
0.422494
0.426926
0.430544
0.433461
0.435794
0.437662
0.439173
0.44043
0.441531
0.442565
0.443623
0.444799
0.446193
0.447919
0.450104
0.452886
0.456412
0.460824
0.466243
0.472747
0.480352
0.488996
0.498538
0.50876
0.519394
0.530144
0.540714
0.550834
0.560273
0.568846
0.576427
0.582956
0.588372
0.592657
0.595821
0.597903
0.598958
0.599053
0.598261
0.59666
0.594327
0.591339
0.587768
0.583683
0.579148
0.574222
0.56896
0.563412
0.55762
0.551626
0.545466
0.53917
0.532768
0.526284
0.51974
0.513157
0.506549
0.499934
0.493322
0.486727
0.480156
0.47362
0.467125
0.460678
0.454284
0.447948
0.441674
0.435465
0.429326
0.423257
0.417262
0.411342
0.4055
0.399736
0.394052
0.388448
0.382927
0.377488
0.372132
0.36686
0.361672
0.356569
0.351552
0.34662
0.341774
0.337015
0.332342
0.327758
0.323262
0.318856
0.314541
0.310318
0.306189
0.302157
0.298225
0.294396
0.290677
0.287072
0.283589
0.280238
0.27703
0.27398
0.271108
0.268435
0.265993
0.263818
0.26196
0.260482
0.259468
0.259033
0.259334
0.260595
0.26315
0.267512
0.274508
0.285516
0.302876
0.330526
0.374787
0.445029
0.553965
0.718215
0.960656
1.31631
1.8328
2.56573
3.38411
2.94987
0.230325
0.231306
0.232877
0.235269
0.238861
0.244229
0.252234
0.264086
0.281363
0.305954
0.339915
0.385234
0.44353
0.515758
0.601966
0.701186
0.811492
0.930227
1.05435
1.1808
1.30686
1.43034
1.54964
1.66368
1.77182
1.87367
1.96896
2.05751
2.1391
2.21348
2.28031
2.33909
2.38846
2.42642
2.45276
2.46715
2.46917
2.45832
2.43404
2.3958
2.34307
2.27546
2.19277
2.09513
1.98307
1.8577
1.7208
1.57489
1.42322
1.26968
1.11861
0.974384
0.84109
0.72207
0.619632
0.534897
0.467832
0.417427
0.381946
0.359205
0.34682
0.342421
0.343821
0.349133
0.35682
0.365699
0.374902
0.383829
0.392093
0.399471
0.405866
0.411266
0.415723
0.419322
0.422174
0.424393
0.426096
0.427395
0.428391
0.429177
0.429839
0.430456
0.431104
0.431861
0.43281
0.434043
0.435664
0.437789
0.440548
0.444071
0.448485
0.453887
0.460335
0.467826
0.476284
0.485562
0.495449
0.50569
0.51601
0.526139
0.535829
0.544869
0.553108
0.560443
0.566777
0.572059
0.576272
0.579428
0.581564
0.582731
0.582992
0.582415
0.581072
0.579034
0.576374
0.573157
0.569449
0.565309
0.560792
0.55595
0.550827
0.545467
0.539905
0.534177
0.528311
0.522336
0.516273
0.510145
0.503969
0.497762
0.491537
0.485308
0.479086
0.472879
0.466697
0.460546
0.454433
0.448364
0.442342
0.436374
0.430462
0.42461
0.41882
0.413096
0.407439
0.401851
0.396335
0.390891
0.385521
0.380226
0.375008
0.369868
0.364806
0.359822
0.354919
0.350097
0.345356
0.340698
0.336122
0.331631
0.327225
0.322906
0.318674
0.314531
0.310481
0.306524
0.302665
0.298908
0.295257
0.291719
0.2883
0.285011
0.281863
0.27887
0.276052
0.27343
0.271034
0.268902
0.267079
0.26563
0.264635
0.264205
0.264493
0.265714
0.268186
0.272398
0.279135
0.289709
0.306368
0.332961
0.375814
0.444546
0.552425
0.716666
0.959982
1.31558
1.82453
2.53325
3.29451
3.10267
0.22919
0.228608
0.228222
0.228098
0.228335
0.229074
0.230534
0.233048
0.237096
0.243338
0.252636
0.266053
0.284821
0.310275
0.343738
0.386398
0.439157
0.502495
0.576375
0.660184
0.752757
0.852456
0.957316
1.0652
1.17399
1.28168
1.3865
1.48693
1.5817
1.66973
1.75005
1.82171
1.8838
1.93355
1.97037
1.99377
2.00337
1.99881
1.97977
1.94602
1.89747
1.83421
1.75662
1.6655
1.56211
1.44832
1.32658
1.19997
1.07205
0.946654
0.827573
0.718233
0.621355
0.538728
0.471119
0.418331
0.379379
0.352709
0.33644
0.328564
0.327126
0.330346
0.336699
0.344941
0.354099
0.363449
0.372469
0.380813
0.388271
0.394742
0.400206
0.404702
0.408307
0.411122
0.413259
0.414833
0.415956
0.416731
0.417251
0.417602
0.417859
0.418091
0.418364
0.418742
0.419292
0.420089
0.421218
0.422775
0.424869
0.427618
0.43114
0.435546
0.440916
0.447291
0.454652
0.462918
0.471938
0.48151
0.491394
0.501333
0.51108
0.520404
0.529138
0.537141
0.54428
0.55047
0.555662
0.559845
0.563031
0.565253
0.56656
0.567011
0.566667
0.565597
0.563867
0.561542
0.558687
0.55536
0.551616
0.547509
0.543085
0.538387
0.533454
0.528322
0.523023
0.517583
0.51203
0.506384
0.500666
0.494893
0.489082
0.483245
0.477394
0.471541
0.465694
0.459863
0.454053
0.448272
0.442525
0.436817
0.431153
0.425536
0.419971
0.41446
0.409007
0.403613
0.398281
0.393013
0.387811
0.382677
0.377612
0.372617
0.367695
0.362846
0.358072
0.353373
0.348751
0.344207
0.339742
0.335357
0.331054
0.326834
0.322699
0.318651
0.314691
0.310824
0.307052
0.30338
0.299812
0.296355
0.293016
0.289805
0.286733
0.283815
0.281069
0.278517
0.276188
0.274118
0.272354
0.270957
0.270006
0.269609
0.269911
0.271122
0.273544
0.277641
0.284155
0.294324
0.310286
0.335751
0.376947
0.44358
0.549267
0.71159
0.95282
1.30374
1.79928
2.47402
3.19003
3.18761
0.234458
0.233292
0.232194
0.231185
0.230296
0.229566
0.229053
0.228848
0.229084
0.229951
0.231716
0.234732
0.239459
0.246466
0.25643
0.270119
0.28836
0.311993
0.341814
0.378502
0.422556
0.474214
0.533401
0.599682
0.67225
0.749939
0.83128
0.914575
0.997991
1.07965
1.15766
1.23018
1.29575
1.3527
1.39834
1.4315
1.45132
1.45724
1.44896
1.42643
1.38989
1.33989
1.27737
1.2037
1.12074
1.03084
0.936757
0.841583
0.748506
0.660579
0.580449
0.510131
0.450871
0.403112
0.366578
0.34042
0.3234
0.31407
0.310921
0.312509
0.31753
0.324863
0.333579
0.342932
0.352345
0.361385
0.369751
0.377249
0.383774
0.389297
0.393841
0.397471
0.400278
0.402366
0.403847
0.404831
0.405422
0.405716
0.405799
0.405748
0.40563
0.405506
0.405432
0.405462
0.405651
0.40606
0.406757
0.40782
0.409339
0.411416
0.414158
0.417675
0.422059
0.427381
0.433667
0.440888
0.448957
0.457728
0.467007
0.476567
0.486171
0.495581
0.504637
0.513149
0.520958
0.527944
0.53403
0.539173
0.543362
0.546609
0.548946
0.550417
0.551077
0.550983
0.550199
0.548786
0.546806
0.544316
0.541373
0.538029
0.534331
0.530325
0.52605
0.521543
0.516837
0.511963
0.506946
0.50181
0.496577
0.491266
0.485893
0.480473
0.47502
0.469544
0.464057
0.458568
0.453084
0.447613
0.442161
0.436735
0.431339
0.425978
0.420657
0.415378
0.410146
0.404964
0.399834
0.394759
0.389741
0.384784
0.379887
0.375055
0.370288
0.365588
0.360957
0.356396
0.351906
0.34749
0.343149
0.338884
0.334697
0.33059
0.326565
0.322624
0.31877
0.315005
0.311335
0.307761
0.304291
0.30093
0.297687
0.29457
0.291591
0.288764
0.286109
0.283646
0.281404
0.279419
0.277737
0.276417
0.275536
0.275199
0.275547
0.27678
0.279186
0.283205
0.289534
0.299336
0.31462
0.338916
0.378259
0.442268
0.544695
0.703264
0.939615
1.28182
1.75929
2.39498
3.07129
3.20909
0.2407
0.239325
0.237976
0.236659
0.235384
0.234165
0.233017
0.231967
0.231055
0.230336
0.229891
0.229826
0.230284
0.231451
0.233562
0.236904
0.241817
0.24869
0.257952
0.270055
0.285455
0.304583
0.327814
0.355427
0.387571
0.424216
0.465118
0.509789
0.557474
0.607148
0.657521
0.707067
0.75428
0.799245
0.839369
0.872459
0.896739
0.910911
0.914167
0.90619
0.88714
0.857643
0.818775
0.772046
0.719346
0.662865
0.604964
0.548012
0.494202
0.445378
0.402901
0.367587
0.339711
0.319083
0.305153
0.297133
0.294112
0.295146
0.299327
0.305821
0.313891
0.322896
0.332298
0.341657
0.350624
0.358938
0.366421
0.372968
0.378534
0.383129
0.3868
0.389624
0.391696
0.393121
0.394003
0.394449
0.394553
0.394406
0.394085
0.39366
0.393193
0.392736
0.392339
0.392047
0.391906
0.391965
0.392278
0.392908
0.393929
0.395424
0.397489
0.400223
0.403723
0.408074
0.413332
0.419513
0.426585
0.434458
0.442989
0.451995
0.461261
0.470565
0.479745
0.488589
0.49691
0.504561
0.511431
0.51745
0.522578
0.526804
0.53014
0.532616
0.534271
0.535157
0.535328
0.534841
0.533755
0.532124
0.530006
0.52745
0.524506
0.521218
0.517629
0.513775
0.509692
0.50541
0.500958
0.49636
0.491639
0.486816
0.481908
0.476931
0.4719
0.466827
0.461723
0.456599
0.451464
0.446326
0.441192
0.436069
0.430962
0.425877
0.420819
0.415792
0.410801
0.405848
0.400938
0.396073
0.391256
0.386491
0.38178
0.377125
0.372528
0.367991
0.363518
0.359108
0.354766
0.350492
0.346288
0.342156
0.338099
0.334118
0.330216
0.326396
0.32266
0.319012
0.315455
0.311995
0.308637
0.305388
0.302255
0.299248
0.296379
0.293662
0.291115
0.288761
0.286628
0.28475
0.283173
0.281955
0.281172
0.280924
0.281349
0.282637
0.285064
0.289045
0.295232
0.304709
0.319351
0.342469
0.379818
0.440769
0.538979
0.692101
0.921029
1.25108
1.7069
2.30157
2.93985
3.17577
0.246422
0.244989
0.243569
0.242163
0.240776
0.239409
0.238069
0.236761
0.2355
0.234302
0.233193
0.232208
0.231392
0.230803
0.230518
0.230629
0.23125
0.232514
0.234576
0.237606
0.241787
0.247308
0.254351
0.26308
0.273627
0.286071
0.300421
0.316594
0.334387
0.353463
0.373322
0.39329
0.412613
0.432257
0.451832
0.470068
0.485639
0.497308
0.504072
0.505256
0.500586
0.490209
0.474678
0.454899
0.432048
0.407464
0.382527
0.358538
0.336608
0.317591
0.302046
0.290243
0.282198
0.277731
0.276519
0.278153
0.282179
0.288129
0.295541
0.303973
0.31301
0.322278
0.331444
0.340228
0.348407
0.355814
0.362339
0.367927
0.372567
0.376288
0.379152
0.381237
0.38264
0.383461
0.383799
0.383753
0.383412
0.382858
0.382162
0.381389
0.380593
0.379823
0.379121
0.378527
0.378079
0.377817
0.377785
0.378032
0.378617
0.379609
0.381087
0.38314
0.385858
0.389331
0.393633
0.398811
0.404877
0.411792
0.419468
0.427768
0.436517
0.445521
0.454625
0.463607
0.472265
0.480425
0.48795
0.494737
0.500719
0.50586
0.510151
0.5136
0.516234
0.518093
0.51922
0.519667
0.519488
0.518735
0.517461
0.515717
0.513551
0.511008
0.508131
0.504958
0.501525
0.497864
0.494004
0.489971
0.48579
0.481481
0.477064
0.472556
0.467973
0.463327
0.458632
0.453898
0.449135
0.444353
0.439558
0.43476
0.429963
0.425175
0.4204
0.415644
0.410912
0.406207
0.401534
0.396897
0.392299
0.387742
0.383232
0.378769
0.374358
0.37
0.365698
0.361454
0.357272
0.353153
0.3491
0.345114
0.341199
0.337358
0.333592
0.329905
0.3263
0.322782
0.319353
0.31602
0.312788
0.309665
0.306657
0.303776
0.301033
0.298443
0.296024
0.293799
0.291794
0.290046
0.288598
0.287508
0.28685
0.286721
0.287255
0.288634
0.29112
0.295104
0.301195
0.310398
0.324444
0.346405
0.381682
0.439245
0.532435
0.678619
0.897885
1.21295
1.64441
2.19789
2.79799
3.09821
0.251266
0.249839
0.248424
0.247019
0.245626
0.244244
0.242875
0.241519
0.240179
0.238862
0.237576
0.236331
0.235145
0.234036
0.23303
0.23216
0.231462
0.230982
0.23077
0.230884
0.231382
0.232328
0.233781
0.235794
0.238407
0.241638
0.24548
0.249888
0.254769
0.259978
0.265307
0.270485
0.275216
0.280108
0.285345
0.290654
0.295678
0.300006
0.303227
0.304991
0.305058
0.303346
0.299945
0.295113
0.289245
0.282827
0.276381
0.270411
0.265357
0.261569
0.259291
0.25866
0.259718
0.262424
0.266667
0.272283
0.279068
0.286784
0.295175
0.303973
0.312914
0.321744
0.330236
0.338192
0.345456
0.351912
0.35749
0.362163
0.365938
0.368856
0.370983
0.372398
0.373195
0.373467
0.373311
0.372816
0.372065
0.371133
0.370087
0.368986
0.367878
0.366809
0.365817
0.364935
0.364198
0.363638
0.363289
0.363192
0.363393
0.363946
0.364917
0.366379
0.368414
0.371108
0.374541
0.378779
0.383864
0.389802
0.396552
0.40403
0.412104
0.420626
0.429454
0.438373
0.447176
0.455671
0.463696
0.471121
0.477852
0.483824
0.489004
0.493381
0.496964
0.499776
0.501852
0.503234
0.503968
0.504105
0.503692
0.502779
0.501414
0.49964
0.497501
0.495035
0.492279
0.489265
0.486024
0.482584
0.478969
0.475202
0.471303
0.46729
0.46318
0.458987
0.454725
0.450405
0.446038
0.441635
0.437203
0.43275
0.428285
0.423814
0.419343
0.414878
0.410424
0.405986
0.401568
0.397176
0.392812
0.388481
0.384187
0.379932
0.375721
0.371555
0.367439
0.363375
0.359367
0.355416
0.351526
0.347699
0.343939
0.340249
0.336631
0.33309
0.329629
0.326252
0.322964
0.31977
0.316678
0.313693
0.310826
0.308085
0.305484
0.303038
0.300764
0.298686
0.296831
0.295234
0.29394
0.293003
0.292497
0.292518
0.293193
0.294698
0.297282
0.301312
0.307357
0.316341
0.32985
0.350701
0.383888
0.43785
0.525397
0.663406
0.871116
1.16893
1.57405
2.08704
2.64835
2.98667
0.255218
0.253826
0.252447
0.251079
0.249723
0.248378
0.247044
0.245718
0.244401
0.243094
0.241799
0.240519
0.23926
0.238028
0.236833
0.235686
0.234601
0.233595
0.232686
0.231894
0.231239
0.230744
0.230427
0.230305
0.230388
0.230679
0.23117
0.231837
0.232643
0.233529
0.234419
0.235219
0.235838
0.236515
0.237314
0.23821
0.239153
0.240075
0.240897
0.241545
0.241957
0.242107
0.242006
0.241718
0.241348
0.24104
0.24096
0.241282
0.242172
0.243774
0.246203
0.249534
0.253801
0.258995
0.265065
0.271919
0.27943
0.287439
0.295765
0.304211
0.312579
0.320677
0.328327
0.335379
0.341714
0.347247
0.351932
0.355756
0.35874
0.360928
0.362387
0.363197
0.363445
0.363221
0.362613
0.361705
0.360575
0.359291
0.357916
0.356503
0.355098
0.353741
0.352469
0.351311
0.350296
0.349453
0.348809
0.348398
0.348254
0.348422
0.348951
0.349903
0.351347
0.35336
0.356018
0.359397
0.363557
0.368534
0.37433
0.380908
0.38818
0.39606
0.404425
0.41308
0.421825
0.430462
0.438812
0.446722
0.454071
0.460768
0.466754
0.471994
0.476477
0.480211
0.483216
0.485523
0.487171
0.488203
0.488663
0.488597
0.488051
0.487068
0.485689
0.483955
0.481901
0.479561
0.476967
0.474146
0.471124
0.467926
0.464571
0.46108
0.457469
0.453755
0.44995
0.446069
0.442122
0.438121
0.434074
0.42999
0.425878
0.421745
0.417598
0.413443
0.409286
0.405133
0.400988
0.396857
0.392745
0.388655
0.384592
0.38056
0.376563
0.372604
0.368687
0.364815
0.360993
0.357222
0.353507
0.349851
0.346257
0.342729
0.339271
0.335886
0.332579
0.329355
0.326219
0.323177
0.320236
0.317403
0.314688
0.312102
0.309657
0.307369
0.305258
0.303345
0.301658
0.300233
0.299114
0.298356
0.298029
0.298229
0.299077
0.300744
0.303465
0.307587
0.313635
0.322458
0.3355
0.355313
0.386449
0.436713
0.518196
0.647079
0.841719
1.12054
1.49798
1.9715
2.49359
2.85028
0.258369
0.257022
0.25569
0.254372
0.253069
0.251779
0.250501
0.249234
0.247976
0.246725
0.245482
0.244246
0.243019
0.241803
0.2406
0.239415
0.238252
0.237119
0.236023
0.234972
0.233975
0.23304
0.232176
0.231389
0.230685
0.230065
0.229527
0.229063
0.228662
0.228304
0.227966
0.227622
0.227252
0.226938
0.226696
0.226535
0.226461
0.226478
0.226587
0.226792
0.227099
0.227522
0.228086
0.228828
0.229801
0.231067
0.232698
0.234772
0.237363
0.240539
0.244353
0.248837
0.254002
0.259828
0.266265
0.273235
0.28063
0.288317
0.296145
0.303952
0.31157
0.31884
0.325617
0.331777
0.337224
0.341895
0.345756
0.348807
0.351072
0.352601
0.353458
0.35372
0.353471
0.352795
0.351774
0.350486
0.349004
0.34739
0.345703
0.343992
0.342301
0.340667
0.339122
0.337695
0.336412
0.335298
0.334376
0.333672
0.333215
0.333037
0.33318
0.33369
0.334623
0.336045
0.338025
0.340637
0.343947
0.348013
0.352866
0.358507
0.364889
0.372002
0.379737
0.387937
0.39642
0.404994
0.413476
0.421695
0.429508
0.436799
0.443482
0.449499
0.454817
0.459424
0.463324
0.466536
0.469087
0.471011
0.472349
0.473141
0.473429
0.473254
0.472657
0.471677
0.47035
0.468709
0.466787
0.464612
0.462211
0.459608
0.456825
0.453882
0.450797
0.447586
0.444265
0.440847
0.437344
0.433768
0.430129
0.426436
0.422699
0.418925
0.415121
0.411296
0.407455
0.403604
0.39975
0.395897
0.392052
0.388218
0.384401
0.380606
0.376836
0.373097
0.369391
0.365724
0.362098
0.358519
0.35499
0.351514
0.348096
0.34474
0.341451
0.338232
0.335089
0.332027
0.329053
0.326171
0.323391
0.32072
0.318169
0.315749
0.313473
0.311357
0.309422
0.307689
0.306189
0.304955
0.304032
0.303474
0.303353
0.303759
0.304812
0.306675
0.309573
0.313829
0.319932
0.328657
0.341307
0.360173
0.389349
0.435931
0.511132
0.630246
0.810698
1.06927
1.41822
1.85336
2.33622
2.69688
0.260836
0.259536
0.258252
0.256986
0.255738
0.254506
0.25329
0.252087
0.250895
0.249714
0.248541
0.247375
0.246215
0.245061
0.243913
0.242773
0.241641
0.24052
0.239413
0.238324
0.237258
0.236217
0.235208
0.234234
0.2333
0.232407
0.231558
0.230752
0.229989
0.229264
0.228575
0.227917
0.227292
0.226728
0.226235
0.225826
0.225517
0.225323
0.225262
0.225356
0.225626
0.2261
0.22681
0.227793
0.229088
0.230742
0.232803
0.235317
0.238329
0.241876
0.245984
0.250663
0.255905
0.26168
0.267933
0.274588
0.281542
0.288675
0.295852
0.302928
0.309757
0.3162
0.322134
0.327452
0.332076
0.335954
0.339066
0.341415
0.343032
0.343965
0.344279
0.344047
0.343346
0.342258
0.340858
0.339219
0.337408
0.335485
0.333502
0.331507
0.32954
0.327636
0.325827
0.324138
0.322594
0.321218
0.320029
0.319049
0.318302
0.317812
0.31761
0.317734
0.318226
0.319138
0.320531
0.322469
0.32502
0.328247
0.332201
0.336911
0.342371
0.348622
0.355591
0.363163
0.371188
0.379494
0.387901
0.396234
0.404333
0.412062
0.41931
0.425995
0.43206
0.437472
0.442217
0.446298
0.449729
0.452535
0.454746
0.456399
0.45753
0.458178
0.458381
0.458175
0.457597
0.45668
0.455456
0.453953
0.452199
0.450218
0.448033
0.445664
0.443131
0.44045
0.437638
0.434708
0.431674
0.428547
0.425338
0.422059
0.418717
0.415323
0.411883
0.408406
0.404898
0.401368
0.39782
0.394262
0.390698
0.387135
0.383578
0.380032
0.376503
0.372994
0.369511
0.366058
0.36264
0.359261
0.355926
0.352639
0.349406
0.34623
0.343117
0.340071
0.337099
0.334207
0.331401
0.328688
0.326076
0.323576
0.321196
0.318951
0.316853
0.31492
0.313172
0.311633
0.310333
0.309306
0.308597
0.30826
0.308367
0.309005
0.310291
0.312383
0.315493
0.319927
0.326135
0.334823
0.347163
0.365189
0.392537
0.435554
0.504454
0.613471
0.779027
1.01655
1.33664
1.73454
2.17855
2.5329
0.262734
0.261478
0.26024
0.259022
0.257825
0.256647
0.255488
0.254346
0.253218
0.252104
0.251001
0.249907
0.248821
0.247741
0.246666
0.245595
0.244529
0.243467
0.242411
0.241362
0.240322
0.239294
0.23828
0.237284
0.236309
0.235358
0.234435
0.233543
0.232684
0.231862
0.231079
0.230339
0.229652
0.229026
0.228473
0.228009
0.227648
0.227408
0.227312
0.227382
0.227645
0.22813
0.22887
0.2299
0.231256
0.232978
0.235103
0.237667
0.2407
0.244228
0.248261
0.252799
0.257824
0.263299
0.269167
0.275352
0.281755
0.288265
0.294756
0.301098
0.30716
0.312818
0.317961
0.322501
0.32637
0.329528
0.33196
0.333675
0.334707
0.335103
0.334926
0.334245
0.333136
0.331671
0.329923
0.327959
0.325843
0.323628
0.321366
0.3191
0.316869
0.314706
0.31264
0.310698
0.308901
0.307271
0.305825
0.304583
0.303564
0.302786
0.302274
0.302055
0.302162
0.302636
0.303523
0.304878
0.306763
0.309238
0.312362
0.316181
0.320727
0.326069
0.332179
0.338985
0.346378
0.354217
0.36234
0.370577
0.378765
0.386751
0.394405
0.401623
0.408323
0.41445
0.41997
0.424866
0.42914
0.432803
0.435876
0.438385
0.440362
0.441842
0.442857
0.443444
0.443635
0.443464
0.442962
0.442156
0.441075
0.439742
0.438182
0.436415
0.43446
0.432336
0.430058
0.427641
0.425099
0.422445
0.41969
0.416845
0.413921
0.410926
0.407869
0.404758
0.401602
0.398408
0.395183
0.391933
0.388666
0.385386
0.382101
0.378817
0.375537
0.372269
0.369018
0.365788
0.362585
0.359414
0.35628
0.353189
0.350145
0.347155
0.344223
0.341356
0.338559
0.335841
0.333207
0.330667
0.328229
0.325902
0.3237
0.321635
0.319721
0.317978
0.316425
0.315089
0.313998
0.31319
0.312709
0.31261
0.312962
0.313854
0.315399
0.317747
0.321104
0.325754
0.332115
0.340829
0.352939
0.370242
0.395925
0.435579
0.498341
0.597238
0.747595
0.963699
1.25497
1.61676
2.02266
2.36354
0.264168
0.262951
0.261753
0.260577
0.259424
0.258294
0.257184
0.256095
0.255024
0.253969
0.252928
0.2519
0.250881
0.249871
0.248867
0.247868
0.246873
0.24588
0.244891
0.243905
0.242923
0.241946
0.240976
0.240016
0.239068
0.238135
0.237221
0.23633
0.235467
0.234637
0.233844
0.233096
0.232403
0.23177
0.23121
0.230737
0.230367
0.230118
0.230012
0.230072
0.230324
0.230797
0.231521
0.232531
0.233859
0.235539
0.237603
0.240081
0.242995
0.246363
0.250188
0.254463
0.259165
0.264254
0.269672
0.275343
0.281173
0.287058
0.292881
0.298521
0.303859
0.308785
0.3132
0.317027
0.320208
0.322711
0.324527
0.325669
0.326172
0.326084
0.325463
0.324378
0.322896
0.321089
0.319022
0.31676
0.314361
0.311875
0.309352
0.306831
0.30435
0.301941
0.299631
0.297446
0.295407
0.293534
0.291843
0.290352
0.289076
0.288031
0.287235
0.286709
0.286477
0.286569
0.28702
0.287875
0.289182
0.290997
0.293378
0.296372
0.300044
0.304479
0.309682
0.315626
0.322246
0.329441
0.337079
0.34501
0.353074
0.361116
0.368993
0.376581
0.383777
0.390503
0.396704
0.402345
0.407406
0.411887
0.415794
0.419145
0.421964
0.424276
0.426113
0.427506
0.428484
0.429079
0.429321
0.429237
0.428854
0.428197
0.427288
0.426148
0.424798
0.423256
0.421538
0.419659
0.417634
0.415475
0.413196
0.410808
0.40832
0.405744
0.403088
0.400362
0.397574
0.394731
0.391843
0.388915
0.385956
0.382971
0.379969
0.376954
0.373934
0.370915
0.367902
0.364901
0.361919
0.358961
0.356033
0.35314
0.350289
0.347487
0.344738
0.342051
0.339432
0.336888
0.334428
0.332061
0.329796
0.327645
0.325621
0.323737
0.322009
0.320458
0.319104
0.317974
0.317099
0.316516
0.316271
0.31642
0.317031
0.318192
0.320014
0.322643
0.326274
0.331176
0.337733
0.346528
0.35849
0.37519
0.399393
0.43595
0.49289
0.581922
0.717172
0.91192
1.17476
1.50164
1.8704
2.19299
0.265229
0.264045
0.26288
0.26174
0.260623
0.259532
0.258463
0.257417
0.256392
0.255386
0.254398
0.253424
0.252463
0.251512
0.25057
0.249635
0.248704
0.247778
0.246853
0.245931
0.245011
0.244093
0.243179
0.24227
0.241368
0.240477
0.239599
0.238739
0.237902
0.237092
0.236317
0.235584
0.234902
0.234277
0.233722
0.233249
0.232876
0.232621
0.232503
0.232545
0.232774
0.233215
0.233898
0.234853
0.236111
0.237702
0.239654
0.24199
0.244729
0.247883
0.25145
0.25542
0.259766
0.264446
0.269402
0.27456
0.279831
0.285115
0.290303
0.295285
0.299951
0.304201
0.307948
0.311122
0.313677
0.315585
0.316844
0.317469
0.317495
0.316969
0.315949
0.314498
0.312681
0.310563
0.308209
0.305676
0.303019
0.300287
0.297524
0.29477
0.29206
0.289423
0.286888
0.284477
0.282212
0.280111
0.278191
0.276465
0.27495
0.273656
0.2726
0.271795
0.27126
0.271017
0.27109
0.271513
0.272323
0.273568
0.275295
0.277543
0.280415
0.283981
0.288277
0.293312
0.299063
0.305472
0.312446
0.319866
0.327591
0.335474
0.343369
0.351138
0.358663
0.365846
0.372608
0.378895
0.384668
0.389909
0.394609
0.398775
0.402418
0.405558
0.408219
0.410424
0.412203
0.413582
0.414588
0.415248
0.415587
0.41563
0.415397
0.414912
0.414192
0.413257
0.412124
0.410807
0.409322
0.407682
0.4059
0.403988
0.401957
0.399818
0.39758
0.395254
0.392848
0.390371
0.387831
0.385237
0.382596
0.379916
0.377204
0.374467
0.371712
0.368947
0.366177
0.363409
0.36065
0.357906
0.355185
0.352491
0.349833
0.347218
0.344651
0.342141
0.339696
0.337324
0.335035
0.332837
0.330743
0.328763
0.326913
0.325206
0.323661
0.322298
0.32114
0.320215
0.319554
0.319198
0.319191
0.319591
0.320467
0.321906
0.324018
0.326943
0.330869
0.336048
0.342838
0.351765
0.363653
0.379869
0.402787
0.436556
0.488109
0.567774
0.688374
0.862227
1.09737
1.39064
1.72344
2.02457
0.265994
0.264835
0.263697
0.262584
0.261497
0.260436
0.259399
0.258387
0.257398
0.256431
0.255483
0.254552
0.253637
0.252734
0.251843
0.250959
0.250082
0.24921
0.248341
0.247474
0.246609
0.245745
0.244883
0.244025
0.24317
0.242322
0.241485
0.24066
0.239854
0.239072
0.23832
0.237605
0.236936
0.23632
0.235768
0.235294
0.234914
0.234646
0.234508
0.234523
0.234715
0.235109
0.235732
0.236613
0.237779
0.239256
0.24107
0.24324
0.245781
0.248699
0.251993
0.255646
0.25963
0.263903
0.268407
0.273071
0.277808
0.282524
0.287118
0.291487
0.295531
0.299158
0.302291
0.30487
0.306853
0.308222
0.308976
0.309134
0.308731
0.307813
0.306434
0.304657
0.302541
0.300148
0.297538
0.294766
0.291882
0.288934
0.285962
0.283004
0.280093
0.277258
0.274524
0.271915
0.269449
0.267146
0.265018
0.263082
0.261347
0.259828
0.258533
0.257475
0.256668
0.256125
0.255866
0.255914
0.256296
0.257048
0.258208
0.259807
0.261959
0.264724
0.268152
0.272277
0.277111
0.282638
0.288807
0.295537
0.302718
0.310222
0.317914
0.325654
0.333314
0.34078
0.347956
0.354764
0.361148
0.367068
0.372501
0.377437
0.381875
0.385825
0.389301
0.392321
0.394906
0.397081
0.398868
0.400291
0.401374
0.402139
0.402608
0.4028
0.402736
0.402432
0.401906
0.401174
0.400251
0.39915
0.397885
0.396468
0.394911
0.393225
0.39142
0.389507
0.387496
0.385395
0.383215
0.380963
0.378648
0.376278
0.373862
0.371407
0.368921
0.366411
0.363885
0.36135
0.358814
0.356283
0.353765
0.351268
0.348798
0.346364
0.343974
0.341635
0.339358
0.33715
0.335022
0.332986
0.331052
0.329235
0.327549
0.32601
0.324638
0.323454
0.322483
0.321754
0.3213
0.321162
0.321388
0.322035
0.323174
0.32489
0.327294
0.330522
0.334753
0.340228
0.347276
0.356376
0.368255
0.384099
0.405925
0.437237
0.483921
0.554909
0.661633
0.815434
1.02395
1.28504
1.58322
1.86096
0.266522
0.265383
0.264265
0.263172
0.262107
0.261067
0.260054
0.259067
0.258105
0.257165
0.256248
0.255349
0.254468
0.253601
0.252747
0.251904
0.251068
0.250238
0.249412
0.248589
0.247768
0.246948
0.24613
0.245312
0.244497
0.243687
0.242883
0.24209
0.241311
0.240552
0.239818
0.239118
0.238457
0.237844
0.237291
0.236809
0.236415
0.236126
0.23596
0.235937
0.236082
0.236417
0.236968
0.237761
0.238821
0.240172
0.241835
0.243827
0.246161
0.248839
0.251855
0.255193
0.258822
0.2627
0.26677
0.270962
0.275194
0.279377
0.283415
0.287214
0.290681
0.293734
0.296304
0.298337
0.299801
0.300681
0.300979
0.300718
0.299931
0.298664
0.29697
0.294907
0.292532
0.289903
0.287077
0.284104
0.281033
0.277909
0.274768
0.271648
0.268578
0.265587
0.262697
0.259931
0.257305
0.254836
0.252538
0.250424
0.248502
0.246785
0.24528
0.243997
0.242945
0.242136
0.241581
0.241298
0.241306
0.24163
0.242297
0.24334
0.244858
0.246905
0.249533
0.25279
0.256711
0.261313
0.266583
0.272481
0.278938
0.285857
0.293123
0.300609
0.308187
0.315736
0.323144
0.330318
0.33718
0.343673
0.349753
0.355393
0.360578
0.365304
0.369574
0.373397
0.376788
0.379762
0.38234
0.38454
0.386383
0.38789
0.389079
0.38997
0.390581
0.39093
0.391032
0.390905
0.390562
0.390018
0.389287
0.38838
0.387311
0.386091
0.384731
0.383242
0.381634
0.379918
0.378103
0.376198
0.374213
0.372157
0.370038
0.367865
0.365647
0.363391
0.361106
0.3588
0.356481
0.354157
0.351836
0.349527
0.347237
0.344975
0.34275
0.340572
0.338449
0.336393
0.334414
0.332525
0.330738
0.329067
0.32753
0.326143
0.324927
0.323906
0.323105
0.322555
0.322291
0.322357
0.3228
0.32368
0.325068
0.327052
0.329739
0.333265
0.337802
0.343577
0.350897
0.360197
0.372121
0.38769
0.408607
0.437796
0.48017
0.543305
0.637187
0.772118
0.955386
1.18592
1.45101
1.70431
0.26686
0.265734
0.26463
0.263551
0.262499
0.261475
0.260477
0.259507
0.258562
0.257641
0.256744
0.255867
0.255009
0.254168
0.25334
0.252524
0.251718
0.250918
0.250124
0.249333
0.248544
0.247756
0.246968
0.246182
0.245396
0.244613
0.243834
0.243063
0.242303
0.241559
0.240836
0.240142
0.239483
0.238866
0.238303
0.237807
0.237391
0.237073
0.23687
0.236803
0.236891
0.237159
0.237629
0.238325
0.239271
0.240487
0.241994
0.243804
0.245927
0.248364
0.251106
0.254135
0.25742
0.260918
0.264573
0.268316
0.272071
0.275753
0.279272
0.282539
0.285472
0.287996
0.290048
0.291585
0.292576
0.293014
0.292904
0.29227
0.291146
0.289576
0.287612
0.285309
0.282721
0.279903
0.276909
0.273787
0.270582
0.267336
0.264085
0.260861
0.257694
0.254608
0.251625
0.248762
0.246036
0.243461
0.241048
0.238806
0.236746
0.234874
0.233198
0.231725
0.230462
0.229418
0.228602
0.228024
0.2277
0.227646
0.22788
0.228445
0.229414
0.230832
0.232747
0.235208
0.238262
0.241946
0.246279
0.251259
0.256855
0.26301
0.269641
0.276646
0.283911
0.291316
0.298746
0.306095
0.31327
0.320192
0.326802
0.333051
0.338909
0.344354
0.349377
0.353976
0.358154
0.36192
0.365286
0.368266
0.370877
0.373136
0.375058
0.376661
0.377963
0.378978
0.379724
0.380215
0.380466
0.380492
0.380305
0.37992
0.379348
0.378602
0.377693
0.376634
0.375434
0.374105
0.372657
0.3711
0.369444
0.367699
0.365874
0.363978
0.362022
0.360013
0.357962
0.355875
0.353764
0.351635
0.349499
0.347364
0.34524
0.343135
0.34106
0.339025
0.33704
0.335117
0.333267
0.331505
0.329843
0.328298
0.326887
0.325629
0.324546
0.323662
0.323006
0.322611
0.322512
0.322755
0.32339
0.324479
0.326093
0.328322
0.331272
0.335076
0.339904
0.34597
0.353562
0.363073
0.375077
0.39045
0.410623
0.438007
0.476639
0.532819
0.615083
0.732611
0.892309
1.09411
1.32781
1.55634
0.267038
0.26592
0.264822
0.263751
0.262707
0.26169
0.260702
0.25974
0.258805
0.257895
0.257009
0.256145
0.255301
0.254474
0.253663
0.252864
0.252076
0.251296
0.250521
0.249751
0.248983
0.248216
0.247448
0.246681
0.245914
0.245147
0.244384
0.243625
0.242874
0.242136
0.241416
0.24072
0.240053
0.239425
0.238845
0.238327
0.237882
0.237528
0.237282
0.237161
0.237186
0.237379
0.237762
0.238356
0.239182
0.24026
0.241607
0.243234
0.245147
0.247345
0.249819
0.252548
0.2555
0.258632
0.26189
0.265208
0.268512
0.271722
0.274754
0.277527
0.279965
0.282
0.283579
0.284663
0.285229
0.285272
0.284802
0.283844
0.282432
0.28061
0.278429
0.275939
0.273196
0.27025
0.267152
0.263948
0.260681
0.25739
0.254109
0.250866
0.247688
0.244596
0.241608
0.23874
0.236003
0.233407
0.230961
0.228671
0.226544
0.224584
0.222797
0.221188
0.219762
0.218526
0.217486
0.216651
0.216032
0.215642
0.21549
0.215645
0.216143
0.217017
0.218309
0.220064
0.222328
0.225148
0.228562
0.232594
0.237249
0.242509
0.24833
0.254644
0.261361
0.268382
0.275595
0.282892
0.290172
0.29734
0.304319
0.311042
0.31746
0.323535
0.329239
0.334556
0.339479
0.344005
0.348138
0.351884
0.355254
0.358259
0.360914
0.363231
0.365225
0.366912
0.368305
0.369419
0.370268
0.370867
0.371229
0.371366
0.371293
0.371021
0.370563
0.36993
0.369135
0.368189
0.367102
0.365886
0.364552
0.363109
0.361568
0.359939
0.358233
0.356458
0.354625
0.352743
0.350823
0.348873
0.346903
0.344924
0.342945
0.340976
0.339029
0.337114
0.335242
0.333427
0.331681
0.330018
0.328453
0.327005
0.32569
0.32453
0.323548
0.32277
0.322226
0.32195
0.321982
0.322366
0.323158
0.324418
0.326222
0.328659
0.331837
0.335888
0.340975
0.347306
0.35515
0.364867
0.376965
0.392199
0.411764
0.43763
0.473067
0.523209
0.595186
0.696994
0.83506
1.01018
1.21438
1.41834
0.267073
0.265956
0.26486
0.26379
0.262748
0.261733
0.260746
0.259787
0.258854
0.257948
0.257066
0.256206
0.255368
0.254547
0.253743
0.252953
0.252173
0.251402
0.250638
0.249878
0.24912
0.248364
0.247607
0.246849
0.24609
0.24533
0.244571
0.243816
0.243065
0.242325
0.241598
0.240892
0.240211
0.239563
0.23896
0.238412
0.237932
0.237536
0.237239
0.237059
0.237016
0.23713
0.23742
0.237907
0.238611
0.239548
0.240734
0.242177
0.243882
0.245846
0.248057
0.250493
0.253124
0.255905
0.258784
0.261697
0.264574
0.26734
0.269916
0.272229
0.274209
0.275797
0.276945
0.277622
0.27781
0.277508
0.276731
0.275502
0.27386
0.271847
0.269511
0.266905
0.26408
0.261086
0.25797
0.254778
0.251549
0.248319
0.245118
0.241974
0.238907
0.235934
0.23307
0.230323
0.227702
0.225211
0.222853
0.220632
0.218548
0.216603
0.214799
0.213138
0.211625
0.210262
0.209057
0.208016
0.207148
0.206468
0.205996
0.205798
0.205893
0.206306
0.207068
0.208214
0.209787
0.211831
0.214391
0.217505
0.221203
0.225499
0.230386
0.235834
0.24179
0.248181
0.254917
0.261901
0.26903
0.276206
0.283337
0.290342
0.297153
0.303711
0.309975
0.315909
0.321493
0.326709
0.331552
0.336017
0.340109
0.343831
0.347191
0.350201
0.35287
0.355212
0.357239
0.358963
0.360399
0.361559
0.362457
0.363107
0.36352
0.36371
0.363689
0.363471
0.363066
0.362487
0.361746
0.360855
0.359824
0.358665
0.357389
0.356007
0.35453
0.352967
0.351331
0.349631
0.347877
0.346081
0.344252
0.342401
0.34054
0.338679
0.33683
0.335004
0.333215
0.331475
0.329799
0.328202
0.326699
0.32531
0.324054
0.322953
0.322032
0.321318
0.320843
0.320643
0.32076
0.32124
0.32214
0.323524
0.325468
0.328064
0.33142
0.335667
0.340966
0.347517
0.355574
0.365468
0.377653
0.392777
0.411838
0.436431
0.46917
0.51416
0.577213
0.665113
0.783685
0.934372
1.11118
1.29125
0.266964
0.265844
0.264744
0.263671
0.262624
0.261606
0.260614
0.259651
0.258715
0.257805
0.25692
0.256059
0.255218
0.254397
0.253592
0.252802
0.252023
0.251253
0.25049
0.249732
0.248976
0.248221
0.247465
0.246707
0.245948
0.245187
0.244425
0.243664
0.242906
0.242155
0.241415
0.240691
0.239988
0.239315
0.238682
0.238098
0.237577
0.237133
0.236781
0.236538
0.236422
0.236452
0.236646
0.237024
0.237603
0.238398
0.239423
0.240683
0.242183
0.243917
0.245871
0.248025
0.250344
0.252788
0.255305
0.257833
0.260307
0.262655
0.264807
0.266694
0.268254
0.269435
0.270198
0.270516
0.270378
0.269788
0.268763
0.26733
0.265527
0.263398
0.260992
0.25836
0.255552
0.252618
0.249602
0.246549
0.243494
0.240471
0.237506
0.234621
0.231833
0.229152
0.226587
0.22414
0.221813
0.219604
0.217509
0.215525
0.213648
0.211874
0.2102
0.208624
0.207149
0.205774
0.204505
0.203348
0.202312
0.201417
0.200715
0.200247
0.200028
0.200076
0.200411
0.201062
0.202061
0.203444
0.205254
0.207534
0.210322
0.213653
0.217547
0.22201
0.227025
0.232556
0.238547
0.244921
0.251595
0.258473
0.265463
0.272474
0.279424
0.286241
0.292861
0.299236
0.305324
0.311097
0.316532
0.321615
0.326339
0.330699
0.334698
0.338339
0.34163
0.344577
0.347192
0.349485
0.351468
0.353152
0.354552
0.355678
0.356545
0.357164
0.357549
0.357713
0.357667
0.357425
0.356998
0.3564
0.355641
0.354735
0.353692
0.352524
0.351243
0.34986
0.348387
0.346834
0.345213
0.343535
0.341812
0.340054
0.338274
0.336483
0.334694
0.33292
0.331173
0.329468
0.327821
0.326246
0.324762
0.323388
0.322145
0.321055
0.320146
0.319446
0.318988
0.318811
0.318956
0.319475
0.320424
0.32187
0.323892
0.326582
0.330051
0.334428
0.339873
0.34658
0.35479
0.364809
0.377045
0.39206
0.410682
0.434199
0.464671
0.505322
0.560773
0.636615
0.737959
0.866651
1.01836
1.17563
0.2667
0.265571
0.264462
0.263379
0.262323
0.261295
0.260295
0.259322
0.258377
0.257458
0.256564
0.255694
0.254845
0.254016
0.253204
0.252407
0.251622
0.250846
0.250078
0.249314
0.248552
0.247791
0.247029
0.246265
0.245497
0.244727
0.243955
0.243181
0.242409
0.241641
0.240882
0.240134
0.239405
0.238701
0.238032
0.237409
0.236842
0.236346
0.235935
0.235625
0.235433
0.235376
0.235473
0.23574
0.236193
0.236847
0.23771
0.238791
0.240089
0.241597
0.243303
0.245182
0.247203
0.249324
0.251495
0.253659
0.255753
0.257713
0.259472
0.260969
0.262151
0.262972
0.263399
0.263415
0.263013
0.262202
0.261003
0.259447
0.257574
0.25543
0.253065
0.250529
0.247874
0.245147
0.242392
0.239651
0.236957
0.234338
0.231818
0.229411
0.227127
0.224972
0.222944
0.221039
0.21925
0.217567
0.215979
0.214475
0.213042
0.211671
0.210352
0.209079
0.207848
0.206656
0.205506
0.204403
0.203357
0.202405
0.201635
0.201053
0.200671
0.200501
0.200558
0.200862
0.201436
0.202307
0.203509
0.205079
0.207057
0.209483
0.212393
0.215815
0.219764
0.22424
0.229222
0.234671
0.240531
0.24673
0.253187
0.259817
0.266533
0.273254
0.279904
0.286418
0.29274
0.298823
0.30463
0.310133
0.315311
0.320152
0.324645
0.328788
0.33258
0.336025
0.339128
0.341898
0.344342
0.34647
0.348295
0.349827
0.351079
0.352062
0.35279
0.353275
0.35353
0.353568
0.3534
0.353041
0.352502
0.351796
0.350935
0.349931
0.348797
0.347545
0.346186
0.344732
0.343196
0.341589
0.339923
0.33821
0.336463
0.334695
0.332918
0.331145
0.329392
0.327672
0.326003
0.324399
0.322882
0.321469
0.320183
0.319049
0.318094
0.317348
0.316846
0.316627
0.316736
0.317224
0.318152
0.319587
0.32161
0.324317
0.327816
0.332241
0.337748
0.344525
0.352805
0.362874
0.375099
0.38997
0.408179
0.430759
0.459319
0.496342
0.545414
0.610999
0.697414
0.806661
0.935734
1.07165
0.26625
0.265108
0.263984
0.262887
0.261817
0.260774
0.259759
0.258772
0.257812
0.256878
0.25597
0.255086
0.254224
0.253382
0.252557
0.251747
0.25095
0.250163
0.249383
0.248607
0.247834
0.247061
0.246287
0.24551
0.244729
0.243944
0.243156
0.242365
0.241573
0.240784
0.24
0.239225
0.238464
0.237726
0.237018
0.236351
0.235736
0.235185
0.234713
0.234334
0.234064
0.23392
0.233919
0.234075
0.234404
0.234917
0.235623
0.236527
0.237628
0.238918
0.240383
0.241998
0.243733
0.245547
0.247392
0.249215
0.250958
0.25256
0.253963
0.255113
0.255964
0.256479
0.256633
0.256415
0.255826
0.254881
0.253606
0.252036
0.250215
0.248192
0.246019
0.243747
0.241428
0.239108
0.236833
0.234637
0.232552
0.230601
0.2288
0.227158
0.225677
0.224353
0.223176
0.222134
0.22121
0.220384
0.219638
0.218949
0.218299
0.217668
0.217041
0.216403
0.215744
0.215058
0.214341
0.213594
0.212836
0.212161
0.211616
0.211205
0.21093
0.210798
0.210815
0.210994
0.211349
0.211899
0.212669
0.213689
0.214992
0.216616
0.218601
0.220985
0.223801
0.227076
0.230821
0.235034
0.239696
0.244771
0.250207
0.25594
0.261898
0.268003
0.274179
0.280353
0.286457
0.292431
0.298224
0.303792
0.309101
0.314125
0.318842
0.323238
0.327306
0.331041
0.334441
0.33751
0.340251
0.342672
0.344779
0.346584
0.348095
0.349325
0.350284
0.350985
0.35144
0.351661
0.351662
0.351454
0.35105
0.350463
0.349706
0.348791
0.34773
0.346536
0.345222
0.3438
0.342281
0.34068
0.339007
0.337276
0.3355
0.333692
0.331865
0.330034
0.328213
0.326417
0.324664
0.32297
0.321356
0.319841
0.318449
0.317204
0.316136
0.315275
0.314658
0.314325
0.314321
0.314701
0.315525
0.316864
0.318801
0.321431
0.324866
0.329238
0.334704
0.341448
0.349694
0.359711
0.371834
0.386495
0.404273
0.425999
0.452915
0.486902
0.530673
0.587674
0.6614
0.753786
0.862819
0.979118
0.265572
0.26441
0.263268
0.262151
0.261061
0.259999
0.258964
0.257957
0.256978
0.256026
0.255099
0.254196
0.253316
0.252455
0.251613
0.250786
0.249972
0.249168
0.248372
0.24758
0.246791
0.246001
0.245211
0.244416
0.243618
0.242814
0.242006
0.241194
0.24038
0.239565
0.238754
0.237948
0.237154
0.23638
0.235632
0.23492
0.234255
0.233649
0.233115
0.232667
0.232321
0.232091
0.231992
0.23204
0.232247
0.232624
0.233177
0.233911
0.234823
0.235904
0.237139
0.238505
0.23997
0.241497
0.243041
0.244551
0.245974
0.247257
0.24835
0.249205
0.249786
0.250065
0.250025
0.249664
0.248991
0.248028
0.246809
0.245375
0.243774
0.242058
0.240284
0.238503
0.236769
0.235127
0.233618
0.232275
0.231123
0.23018
0.229453
0.228943
0.228644
0.228543
0.22862
0.228854
0.229219
0.229685
0.230223
0.230804
0.231399
0.23198
0.232522
0.233003
0.233405
0.233713
0.233918
0.234019
0.234052
0.234177
0.234357
0.23458
0.234838
0.235125
0.235441
0.23579
0.236181
0.23663
0.237154
0.237781
0.238539
0.239464
0.240594
0.24197
0.243631
0.245618
0.247963
0.250691
0.253815
0.257336
0.26124
0.265498
0.270066
0.274893
0.279918
0.285075
0.2903
0.295528
0.3007
0.305762
0.310668
0.315378
0.31986
0.324086
0.328039
0.331702
0.335066
0.338126
0.34088
0.343329
0.345475
0.347325
0.348885
0.350163
0.35117
0.351914
0.352406
0.352658
0.352682
0.352489
0.352091
0.351501
0.350731
0.349793
0.348699
0.347463
0.346096
0.34461
0.34302
0.341336
0.339572
0.33774
0.335855
0.333928
0.331974
0.330008
0.328044
0.326099
0.324189
0.322332
0.320547
0.318857
0.317283
0.315853
0.314596
0.313542
0.312729
0.312199
0.311998
0.312181
0.31281
0.313957
0.315707
0.318155
0.321416
0.325621
0.330927
0.337515
0.345601
0.355441
0.367345
0.381695
0.398984
0.419878
0.445333
0.476749
0.516124
0.566026
0.629153
0.707202
0.79883
0.897494
0.264606
0.263419
0.262251
0.26111
0.259996
0.25891
0.257851
0.256821
0.255818
0.254842
0.253892
0.252967
0.252064
0.251182
0.250319
0.249471
0.248636
0.247812
0.246996
0.246185
0.245376
0.244567
0.243757
0.242943
0.242124
0.241299
0.240469
0.239634
0.238795
0.237955
0.237115
0.236278
0.235451
0.23464
0.233852
0.233097
0.232384
0.231724
0.231131
0.230618
0.230198
0.229886
0.229696
0.229641
0.229732
0.22998
0.230389
0.230963
0.231697
0.232583
0.233605
0.23474
0.235959
0.237225
0.238498
0.239732
0.24088
0.241897
0.242738
0.243369
0.243762
0.243898
0.243773
0.243393
0.242779
0.241962
0.240983
0.239891
0.23874
0.237589
0.236494
0.23551
0.23469
0.234076
0.233706
0.233606
0.233796
0.234284
0.235071
0.236148
0.2375
0.239104
0.240934
0.242957
0.245139
0.247441
0.249826
0.252253
0.254685
0.257085
0.259417
0.261648
0.263752
0.265704
0.267485
0.269097
0.270627
0.272229
0.273743
0.275139
0.276394
0.277492
0.278426
0.279199
0.279822
0.280311
0.280693
0.280997
0.281256
0.28151
0.281798
0.282164
0.282652
0.283304
0.284163
0.285267
0.286649
0.288332
0.290333
0.292655
0.295289
0.298217
0.301408
0.304821
0.30841
0.312126
0.315913
0.319722
0.323502
0.327207
0.330797
0.334235
0.337492
0.340543
0.343368
0.345952
0.348284
0.350358
0.35217
0.353718
0.355005
0.356033
0.356808
0.357336
0.357625
0.357682
0.357519
0.357143
0.356566
0.355799
0.354851
0.353736
0.352463
0.351046
0.349495
0.347822
0.346039
0.34416
0.342195
0.340158
0.338062
0.335919
0.333744
0.331551
0.329355
0.327172
0.325018
0.322912
0.320873
0.318923
0.317084
0.315384
0.313851
0.312518
0.311421
0.310603
0.310111
0.310001
0.310334
0.311185
0.312638
0.31479
0.317755
0.321666
0.326678
0.33297
0.340751
0.350268
0.361808
0.375716
0.392414
0.412444
0.436537
0.46572
0.501423
0.545484
0.599875
0.665953
0.742764
0.825917
0.263273
0.262056
0.26086
0.25969
0.258547
0.257433
0.256346
0.255288
0.254257
0.253255
0.252279
0.251328
0.2504
0.249493
0.248606
0.247735
0.246877
0.246031
0.245193
0.24436
0.24353
0.242701
0.24187
0.241035
0.240195
0.23935
0.238498
0.23764
0.236778
0.235912
0.235045
0.23418
0.233322
0.232478
0.231654
0.230858
0.230102
0.229395
0.228749
0.228176
0.22769
0.227305
0.227032
0.226884
0.226871
0.227003
0.227283
0.227712
0.228287
0.228999
0.229831
0.230763
0.231768
0.232811
0.233858
0.234868
0.235802
0.236624
0.2373
0.237805
0.238124
0.238252
0.238196
0.237976
0.237624
0.237182
0.236701
0.236238
0.235854
0.23561
0.235567
0.235777
0.236291
0.237147
0.238376
0.239997
0.242021
0.244448
0.247269
0.250467
0.254019
0.257892
0.262053
0.26646
0.271069
0.275833
0.280704
0.285632
0.290566
0.295457
0.300255
0.304917
0.309398
0.313662
0.317681
0.321478
0.325261
0.328828
0.332079
0.334955
0.337422
0.33946
0.341069
0.342264
0.34307
0.34352
0.343655
0.343521
0.343165
0.342635
0.341981
0.341253
0.340498
0.339764
0.339096
0.338536
0.338125
0.337895
0.337876
0.338091
0.338552
0.339266
0.340229
0.341428
0.342844
0.34445
0.346212
0.348093
0.350056
0.352059
0.354066
0.356039
0.357944
0.359752
0.361437
0.362975
0.364348
0.365542
0.366546
0.36735
0.367949
0.368341
0.368524
0.3685
0.368272
0.367843
0.367219
0.366406
0.365412
0.364245
0.362912
0.361423
0.359788
0.358016
0.356118
0.354103
0.351983
0.349769
0.347473
0.345105
0.342678
0.340205
0.337699
0.335173
0.332642
0.330121
0.327626
0.325176
0.322788
0.320485
0.31829
0.316227
0.314327
0.312621
0.311145
0.309943
0.309061
0.308554
0.308486
0.308929
0.309968
0.3117
0.31424
0.31772
0.322293
0.328135
0.335452
0.344479
0.355489
0.368795
0.384765
0.403845
0.426596
0.453761
0.486334
0.525568
0.572812
0.62904
0.693475
0.763284
0.261481
0.26023
0.259
0.257798
0.256623
0.255477
0.254359
0.25327
0.252211
0.251179
0.250174
0.249196
0.248241
0.247309
0.246396
0.245501
0.24462
0.243751
0.242891
0.242037
0.241187
0.240337
0.239486
0.238632
0.237774
0.236909
0.236038
0.235162
0.234279
0.233394
0.232504
0.231616
0.230733
0.229862
0.229009
0.228183
0.227392
0.226647
0.225959
0.22534
0.224801
0.224356
0.224016
0.223792
0.223694
0.22373
0.223902
0.224213
0.224657
0.225226
0.225906
0.226677
0.227515
0.228393
0.229279
0.230143
0.230955
0.231688
0.232323
0.232849
0.233265
0.233581
0.23382
0.234016
0.234215
0.23447
0.234843
0.235397
0.236199
0.23731
0.238789
0.240688
0.243046
0.245898
0.249263
0.253153
0.257568
0.262498
0.267925
0.273824
0.280161
0.286896
0.293985
0.301376
0.309014
0.31684
0.32479
0.332799
0.340799
0.348722
0.356501
0.364069
0.371366
0.378336
0.384953
0.391288
0.397453
0.40301
0.407882
0.412023
0.415416
0.418067
0.420003
0.421266
0.421906
0.421984
0.421563
0.420707
0.419482
0.417948
0.416168
0.414199
0.412095
0.409907
0.407684
0.40547
0.403305
0.401226
0.399267
0.397454
0.395811
0.394353
0.393093
0.392033
0.391174
0.390505
0.390014
0.389683
0.389489
0.389406
0.389408
0.389466
0.389552
0.38964
0.389704
0.389719
0.389665
0.389524
0.389278
0.388915
0.388423
0.387794
0.387022
0.386102
0.385032
0.383811
0.382439
0.380919
0.379254
0.377447
0.375505
0.373432
0.371236
0.368923
0.366501
0.363979
0.361365
0.358669
0.3559
0.353068
0.350184
0.347259
0.344306
0.341335
0.338361
0.335399
0.332463
0.329569
0.326738
0.323987
0.321341
0.318822
0.316461
0.314287
0.312338
0.310653
0.309281
0.308274
0.307695
0.307617
0.308123
0.309311
0.311292
0.314197
0.318179
0.32341
0.330091
0.338449
0.348743
0.361262
0.376334
0.394331
0.41569
0.440942
0.470755
0.505934
0.547319
0.595514
0.649773
0.708338
0.259118
0.257831
0.256565
0.255327
0.254119
0.252939
0.251789
0.250669
0.249579
0.248517
0.247484
0.246478
0.245498
0.24454
0.243603
0.242685
0.241782
0.240893
0.240013
0.239141
0.238274
0.237408
0.236542
0.235674
0.234802
0.233924
0.233041
0.232152
0.231258
0.23036
0.229457
0.228555
0.227659
0.226774
0.225906
0.225063
0.224254
0.223488
0.222776
0.222129
0.221559
0.221077
0.220695
0.220423
0.22027
0.220242
0.220344
0.220575
0.220933
0.22141
0.221994
0.22267
0.223418
0.224217
0.225046
0.225884
0.226713
0.227523
0.22831
0.229078
0.229846
0.230641
0.231503
0.232481
0.233635
0.235028
0.236727
0.238802
0.241316
0.244328
0.247891
0.252047
0.256825
0.262248
0.268324
0.275051
0.282418
0.290405
0.298981
0.308109
0.317744
0.327833
0.338318
0.349132
0.360201
0.371446
0.382782
0.394121
0.405371
0.416439
0.427236
0.437673
0.44767
0.457162
0.466158
0.474794
0.482642
0.489455
0.495192
0.499848
0.503449
0.506043
0.507693
0.508473
0.508464
0.507749
0.506411
0.504529
0.502182
0.499442
0.496379
0.493056
0.489532
0.485862
0.482096
0.47828
0.474454
0.470657
0.466921
0.463276
0.459746
0.456352
0.45311
0.45003
0.44712
0.44438
0.441809
0.439399
0.43714
0.435018
0.433018
0.431121
0.429307
0.427558
0.425852
0.42417
0.422494
0.420805
0.419087
0.417327
0.41551
0.413626
0.411664
0.409619
0.407482
0.40525
0.40292
0.400489
0.397957
0.395325
0.392595
0.389768
0.386849
0.383841
0.38075
0.377581
0.374341
0.371036
0.367674
0.364264
0.360813
0.357332
0.35383
0.350319
0.346809
0.343315
0.339849
0.336428
0.333067
0.329786
0.326605
0.323549
0.320643
0.317918
0.315408
0.313152
0.311197
0.309594
0.308405
0.307699
0.307559
0.30808
0.309372
0.311563
0.314802
0.319258
0.325127
0.332629
0.342009
0.35354
0.367517
0.384259
0.404112
0.427458
0.454726
0.486392
0.522905
0.564545
0.610519
0.659774
0.25606
0.254735
0.253433
0.252161
0.250919
0.249707
0.248526
0.247376
0.246257
0.245169
0.244111
0.243081
0.242077
0.241099
0.240142
0.239206
0.238288
0.237384
0.236491
0.235608
0.234731
0.233857
0.232985
0.232112
0.231237
0.230358
0.229474
0.228585
0.227693
0.226797
0.225897
0.224999
0.224107
0.223228
0.222366
0.22153
0.220727
0.219968
0.219262
0.218619
0.218052
0.217571
0.217188
0.216911
0.21675
0.216711
0.216799
0.217016
0.21736
0.217826
0.218407
0.219094
0.219875
0.22074
0.221677
0.222682
0.223754
0.2249
0.226134
0.227483
0.228982
0.230679
0.232628
0.234891
0.237536
0.240632
0.244246
0.248442
0.253276
0.258797
0.265043
0.272039
0.279803
0.288337
0.297633
0.307676
0.318436
0.329877
0.341953
0.354611
0.367786
0.381406
0.395391
0.40965
0.424085
0.43859
0.453056
0.467367
0.481409
0.495069
0.508242
0.520827
0.532737
0.543933
0.554444
0.564168
0.572505
0.579442
0.585003
0.589245
0.592243
0.594086
0.594867
0.594686
0.59364
0.591825
0.589334
0.586253
0.582666
0.578649
0.574272
0.569603
0.5647
0.55962
0.554414
0.549126
0.5438
0.538472
0.533176
0.527942
0.522796
0.51776
0.512851
0.508083
0.503467
0.499007
0.494707
0.490565
0.486577
0.482735
0.47903
0.475449
0.47198
0.468608
0.465317
0.462093
0.45892
0.455782
0.452666
0.449558
0.446446
0.443318
0.440164
0.436975
0.433744
0.430464
0.42713
0.423738
0.420285
0.41677
0.413191
0.409549
0.405844
0.402079
0.398256
0.394379
0.390452
0.38648
0.382467
0.378421
0.374349
0.370257
0.366154
0.36205
0.357954
0.353878
0.349833
0.345833
0.341893
0.338031
0.334266
0.330618
0.327114
0.323781
0.320651
0.317761
0.315156
0.312885
0.311006
0.309588
0.308708
0.308459
0.308947
0.310298
0.312655
0.316183
0.32107
0.327529
0.335797
0.346128
0.358796
0.374081
0.392265
0.413627
0.438432
0.466923
0.499265
0.535483
0.574708
0.616337
0.252171
0.250812
0.249479
0.248176
0.246906
0.245667
0.244462
0.243289
0.242149
0.241042
0.239967
0.238921
0.237905
0.236915
0.23595
0.235007
0.234084
0.233178
0.232286
0.231405
0.230533
0.229667
0.228805
0.227945
0.227084
0.226222
0.225358
0.224491
0.223623
0.222751
0.22188
0.221014
0.220158
0.219316
0.218496
0.217704
0.216949
0.21624
0.215587
0.215
0.214492
0.214072
0.213752
0.213542
0.213452
0.21349
0.213661
0.213971
0.214423
0.215017
0.215753
0.216632
0.217655
0.218823
0.220145
0.221633
0.223305
0.22519
0.227322
0.229748
0.23252
0.235697
0.239345
0.243529
0.248315
0.253767
0.259941
0.266886
0.27464
0.283232
0.292678
0.302981
0.314133
0.326117
0.338902
0.352449
0.36671
0.381625
0.397127
0.413137
0.429567
0.446321
0.463292
0.480363
0.497413
0.514313
0.530936
0.547154
0.562844
0.577893
0.592195
0.605659
0.618225
0.629896
0.640626
0.649791
0.657322
0.66328
0.667752
0.670838
0.672645
0.673284
0.672861
0.671482
0.669248
0.666254
0.662589
0.658338
0.653579
0.648387
0.64283
0.63697
0.630866
0.624571
0.618137
0.611607
0.605023
0.598424
0.591842
0.58531
0.578853
0.572495
0.566255
0.560149
0.554189
0.548384
0.54274
0.537257
0.531935
0.52677
0.521754
0.51688
0.512136
0.507511
0.502993
0.498568
0.494222
0.489941
0.485714
0.481526
0.477366
0.473223
0.469086
0.464946
0.460795
0.456626
0.452432
0.448208
0.443951
0.439657
0.435324
0.43095
0.426536
0.422081
0.417588
0.413057
0.408492
0.403896
0.399273
0.394627
0.389964
0.385291
0.380613
0.375939
0.371277
0.366636
0.362027
0.357462
0.352955
0.34852
0.344174
0.339938
0.335833
0.331885
0.328125
0.324587
0.321311
0.318344
0.31574
0.313564
0.311891
0.310807
0.310414
0.310832
0.312198
0.31467
0.31843
0.323681
0.330649
0.339576
0.350717
0.364325
0.380641
0.399874
0.422185
0.447663
0.476281
0.507882
0.541534
0.576911
0.247323
0.245943
0.24459
0.243271
0.241986
0.240736
0.239521
0.238342
0.237198
0.236089
0.235014
0.233972
0.232962
0.231982
0.231029
0.230101
0.229196
0.228312
0.227444
0.226591
0.225751
0.22492
0.224096
0.223278
0.222463
0.22165
0.220839
0.220029
0.219221
0.218414
0.217614
0.216826
0.216055
0.215306
0.214585
0.2139
0.213261
0.212675
0.212155
0.211711
0.211355
0.2111
0.210956
0.210938
0.211056
0.211322
0.211747
0.212341
0.213116
0.214082
0.21525
0.216635
0.218254
0.220127
0.222282
0.22475
0.22757
0.230788
0.234455
0.238627
0.243365
0.248729
0.254778
0.26157
0.269156
0.277577
0.286866
0.297046
0.308127
0.320105
0.332968
0.346689
0.361233
0.376552
0.392593
0.409289
0.426567
0.444343
0.462524
0.481011
0.499691
0.518447
0.537154
0.555682
0.573901
0.59168
0.608895
0.62543
0.641181
0.656055
0.669969
0.682864
0.694726
0.705483
0.714732
0.722211
0.728037
0.732328
0.735205
0.736787
0.737187
0.736512
0.734864
0.732338
0.729022
0.725001
0.720352
0.715149
0.709462
0.703356
0.696893
0.69013
0.683121
0.675917
0.668565
0.661109
0.65359
0.646046
0.63851
0.631016
0.623589
0.616256
0.609037
0.601951
0.595013
0.588232
0.581618
0.575174
0.568901
0.562799
0.556862
0.551085
0.545458
0.539971
0.534613
0.52937
0.52423
0.51918
0.514206
0.509295
0.504433
0.499611
0.494815
0.490036
0.485265
0.480494
0.475714
0.47092
0.466107
0.461271
0.456407
0.451515
0.446593
0.44164
0.436656
0.431643
0.426602
0.421536
0.416447
0.41134
0.406218
0.401087
0.395953
0.390822
0.385701
0.380599
0.375524
0.370488
0.365501
0.360577
0.355731
0.350981
0.346346
0.34185
0.337519
0.333386
0.329485
0.325862
0.322564
0.319652
0.317195
0.315274
0.313983
0.313435
0.313758
0.315102
0.317638
0.321558
0.327077
0.334425
0.343839
0.355555
0.369787
0.386704
0.406402
0.428885
0.454009
0.481503
0.510405
0.540574
0.241457
0.240082
0.23874
0.237433
0.236164
0.234934
0.233742
0.232589
0.231476
0.230401
0.229364
0.228364
0.227399
0.226467
0.225568
0.224697
0.223853
0.223034
0.222236
0.221458
0.220697
0.21995
0.219215
0.218491
0.217776
0.217069
0.216369
0.215677
0.214993
0.21432
0.213667
0.213039
0.21244
0.211878
0.21136
0.210895
0.210492
0.210163
0.209919
0.209775
0.209743
0.209839
0.21008
0.210481
0.211059
0.211834
0.212825
0.214052
0.215538
0.217306
0.219384
0.221803
0.224596
0.227802
0.231462
0.235622
0.240332
0.245644
0.251608
0.258277
0.265699
0.273918
0.282973
0.292893
0.303696
0.315393
0.327981
0.341444
0.355757
0.370882
0.386773
0.403372
0.420616
0.438433
0.456744
0.475463
0.494495
0.513741
0.533093
0.552437
0.571656
0.590628
0.609231
0.627348
0.644865
0.661677
0.677692
0.69283
0.707023
0.720216
0.732363
0.743444
0.753399
0.761925
0.768718
0.77393
0.777695
0.780138
0.781376
0.781514
0.780647
0.778862
0.776239
0.772851
0.768766
0.76405
0.758765
0.75297
0.746721
0.740073
0.73308
0.725791
0.718256
0.710521
0.702631
0.694627
0.68655
0.678437
0.670323
0.662239
0.654214
0.646277
0.638448
0.63075
0.623198
0.615806
0.608586
0.601543
0.594682
0.588004
0.581506
0.575185
0.569033
0.563042
0.557201
0.551497
0.54592
0.540453
0.535085
0.529801
0.524587
0.519429
0.514316
0.509236
0.504177
0.499129
0.494084
0.489034
0.483972
0.478892
0.47379
0.468663
0.463507
0.458322
0.453107
0.447861
0.442586
0.437284
0.431956
0.426606
0.421238
0.415855
0.410464
0.405068
0.399676
0.394294
0.388929
0.383592
0.378292
0.37304
0.367851
0.362739
0.357722
0.352821
0.348058
0.343461
0.339064
0.334903
0.331024
0.32748
0.324332
0.321654
0.319533
0.318069
0.317381
0.317606
0.318901
0.321446
0.325439
0.331094
0.338636
0.348283
0.360232
0.374631
0.391549
0.410947
0.432632
0.456279
0.480942
0.506616
0.234759
0.233452
0.232181
0.230952
0.229766
0.228623
0.227524
0.22647
0.225461
0.224495
0.223572
0.222692
0.221852
0.221051
0.220287
0.219557
0.21886
0.218193
0.217554
0.21694
0.21635
0.215781
0.215232
0.214702
0.214188
0.213692
0.213212
0.21275
0.212311
0.211909
0.211548
0.211237
0.210983
0.210793
0.210678
0.21065
0.21072
0.210904
0.211216
0.211675
0.212299
0.213109
0.214128
0.215379
0.21689
0.218688
0.220803
0.223268
0.226116
0.229385
0.233113
0.237341
0.242109
0.247462
0.253441
0.260089
0.267445
0.275547
0.284425
0.294106
0.304609
0.315943
0.32811
0.341099
0.354891
0.369454
0.384748
0.400722
0.417318
0.43447
0.452106
0.470148
0.488517
0.507128
0.525894
0.544723
0.563517
0.582176
0.600598
0.618678
0.636316
0.653413
0.669879
0.685631
0.7006
0.714728
0.727973
0.740301
0.75169
0.762117
0.77157
0.780014
0.787157
0.792763
0.796994
0.799991
0.801872
0.802734
0.802661
0.801726
0.799992
0.797514
0.794343
0.790527
0.786111
0.781138
0.775654
0.7697
0.763322
0.756562
0.749466
0.742075
0.734434
0.726585
0.718569
0.710427
0.702197
0.693915
0.685618
0.677336
0.669102
0.660942
0.652881
0.644942
0.637144
0.629502
0.622031
0.614739
0.607633
0.600718
0.593993
0.587457
0.581105
0.574929
0.568922
0.563072
0.557367
0.551795
0.546341
0.540991
0.53573
0.530544
0.525419
0.520342
0.515299
0.510279
0.505272
0.500267
0.495255
0.49023
0.485185
0.480114
0.475014
0.469883
0.464717
0.459516
0.45428
0.44901
0.443707
0.438374
0.433013
0.427629
0.422226
0.416808
0.411381
0.405952
0.400527
0.395115
0.389724
0.384366
0.37905
0.37379
0.368602
0.363503
0.358513
0.353656
0.34896
0.344457
0.340187
0.336195
0.332535
0.32927
0.326478
0.324245
0.322677
0.321895
0.322038
0.323267
0.325761
0.329714
0.33533
0.342813
0.352344
0.364062
0.378029
0.394201
0.412382
0.432252
0.452925
0.474512
0.228301
0.227205
0.226153
0.225152
0.224202
0.223306
0.222463
0.221673
0.220936
0.220251
0.219617
0.219032
0.218496
0.218005
0.217558
0.217152
0.216787
0.216459
0.216168
0.21591
0.215686
0.215493
0.215332
0.215202
0.215103
0.215037
0.215006
0.215015
0.21509
0.215244
0.215486
0.215825
0.216272
0.216838
0.217538
0.218388
0.219404
0.220606
0.222016
0.223656
0.225551
0.22773
0.23022
0.233054
0.236263
0.239881
0.243944
0.248485
0.253541
0.259147
0.265336
0.272139
0.279587
0.287704
0.296512
0.306027
0.31626
0.327215
0.338886
0.351264
0.364327
0.378046
0.392385
0.407298
0.422732
0.438627
0.454917
0.471533
0.488402
0.505448
0.522598
0.539775
0.556905
0.573914
0.590727
0.607266
0.623451
0.639204
0.65445
0.669117
0.683144
0.696479
0.709079
0.720916
0.731975
0.742252
0.751752
0.760485
0.768453
0.775656
0.782069
0.787415
0.791518
0.794567
0.796693
0.797991
0.798529
0.798362
0.79753
0.796064
0.793993
0.791339
0.788125
0.784372
0.780103
0.775344
0.770119
0.764459
0.758396
0.751963
0.745195
0.738131
0.730808
0.723264
0.715538
0.707668
0.699691
0.691644
0.68356
0.675474
0.667416
0.659414
0.651496
0.643684
0.636001
0.628463
0.621086
0.613884
0.606863
0.600032
0.593394
0.586948
0.580693
0.574624
0.568734
0.563015
0.557455
0.552044
0.546767
0.541612
0.536562
0.531604
0.526723
0.521904
0.517134
0.512398
0.507685
0.502982
0.49828
0.493569
0.488841
0.484089
0.479307
0.474491
0.469637
0.464744
0.459808
0.454832
0.449814
0.444756
0.439661
0.434532
0.429372
0.424185
0.418976
0.413751
0.408516
0.403278
0.398045
0.392826
0.38763
0.382468
0.377354
0.372303
0.367331
0.362458
0.357708
0.353109
0.348691
0.344495
0.340564
0.336952
0.333723
0.330951
0.328723
0.327143
0.326329
0.326416
0.327559
0.329924
0.33369
0.339037
0.34613
0.355097
0.366006
0.378828
0.393399
0.409443
0.426188
0.443847
0.226462
0.225953
0.225508
0.22513
0.224824
0.224588
0.224421
0.22432
0.224285
0.224313
0.2244
0.224545
0.224744
0.224995
0.225295
0.225643
0.226038
0.226477
0.22696
0.227487
0.22806
0.228679
0.229347
0.230066
0.230842
0.23168
0.232587
0.233592
0.234746
0.236048
0.23751
0.239144
0.240964
0.242985
0.245224
0.247698
0.250428
0.253434
0.256738
0.260365
0.264339
0.268684
0.273427
0.278593
0.284207
0.290291
0.296869
0.30396
0.311581
0.319744
0.328461
0.337735
0.347568
0.357953
0.368879
0.380331
0.392284
0.404709
0.417572
0.430829
0.444435
0.458336
0.472475
0.486793
0.501226
0.515712
0.530186
0.544588
0.558857
0.572938
0.586777
0.600326
0.613537
0.626365
0.638766
0.650694
0.662101
0.672949
0.683205
0.692845
0.701857
0.710241
0.718008
0.725179
0.731785
0.737862
0.743445
0.748558
0.753207
0.757364
0.760779
0.763287
0.765137
0.766433
0.767235
0.767576
0.767474
0.766933
0.765954
0.764533
0.762665
0.760347
0.757576
0.754353
0.750681
0.74657
0.742031
0.737082
0.731745
0.726044
0.720009
0.71367
0.707062
0.700218
0.693175
0.685968
0.678633
0.671205
0.663719
0.656206
0.648697
0.641223
0.63381
0.626483
0.619264
0.612172
0.605226
0.598438
0.591821
0.585382
0.579129
0.573062
0.567185
0.561493
0.555982
0.550647
0.545479
0.540466
0.535599
0.530863
0.526246
0.521733
0.517309
0.512961
0.508673
0.504431
0.500222
0.496033
0.491853
0.48767
0.483474
0.479259
0.475015
0.470737
0.46642
0.46206
0.457655
0.453202
0.448703
0.444156
0.439564
0.434927
0.43025
0.425535
0.420786
0.416008
0.411207
0.406389
0.401559
0.396727
0.391899
0.387087
0.382299
0.377549
0.372851
0.36822
0.363677
0.359242
0.354942
0.350808
0.346876
0.343189
0.339797
0.33676
0.334147
0.332041
0.330536
0.329741
0.329778
0.330783
0.332901
0.336278
0.341055
0.347345
0.355216
0.36466
0.375565
0.387725
0.40052
0.414252
0.246091
0.247184
0.248377
0.249668
0.251053
0.252523
0.25407
0.255683
0.257355
0.259075
0.260839
0.262638
0.264467
0.266324
0.268205
0.270109
0.272037
0.273991
0.275974
0.277992
0.280052
0.282161
0.284329
0.28657
0.288894
0.291317
0.293858
0.296594
0.299524
0.302655
0.305995
0.309551
0.313332
0.31735
0.321614
0.326135
0.330925
0.335997
0.341361
0.34703
0.353014
0.359323
0.365966
0.372949
0.380277
0.387952
0.395972
0.404335
0.413031
0.42205
0.431376
0.440988
0.450862
0.460971
0.471282
0.481757
0.492357
0.503038
0.513753
0.524456
0.535096
0.545624
0.555992
0.566155
0.57607
0.585699
0.59501
0.603976
0.612577
0.620795
0.62862
0.636042
0.643056
0.649655
0.655831
0.661574
0.666877
0.671743
0.676181
0.680212
0.683863
0.687171
0.690181
0.69294
0.695501
0.697909
0.700199
0.702376
0.704401
0.706032
0.7071
0.707916
0.70855
0.709024
0.709334
0.70946
0.709377
0.709055
0.708462
0.707571
0.706355
0.70479
0.702856
0.700538
0.697826
0.694717
0.691211
0.687318
0.683051
0.678428
0.673472
0.668209
0.662667
0.656879
0.650876
0.644692
0.63836
0.631914
0.625385
0.618806
0.612207
0.605617
0.599062
0.592568
0.586157
0.579849
0.573663
0.567613
0.561714
0.555974
0.550403
0.545004
0.539781
0.534734
0.529861
0.525158
0.520619
0.516235
0.511998
0.507895
0.503917
0.500049
0.496277
0.492589
0.488971
0.485407
0.481885
0.478392
0.474916
0.471444
0.467966
0.464473
0.460956
0.457408
0.453824
0.450197
0.446524
0.442803
0.439032
0.435209
0.431337
0.427414
0.423444
0.419428
0.41537
0.411274
0.407144
0.402984
0.398801
0.394601
0.39039
0.386176
0.381968
0.377775
0.373609
0.369482
0.365408
0.361405
0.357493
0.353695
0.350038
0.346554
0.343282
0.340265
0.337557
0.335218
0.333317
0.331936
0.331163
0.331101
0.331854
0.333533
0.336243
0.340074
0.345085
0.351284
0.358608
0.366929
0.375809
0.385597
0.337695
0.34208
0.346533
0.351019
0.355514
0.35999
0.364421
0.368787
0.373069
0.377257
0.38134
0.385314
0.38918
0.39294
0.396602
0.400174
0.403672
0.407109
0.410503
0.413876
0.417248
0.420644
0.424088
0.427608
0.431229
0.434978
0.438883
0.442972
0.447202
0.451568
0.456063
0.460682
0.465421
0.470277
0.475247
0.480328
0.485519
0.490818
0.496221
0.501724
0.507324
0.513014
0.518785
0.52463
0.530534
0.536485
0.542464
0.548452
0.554426
0.560362
0.566231
0.572004
0.577649
0.583132
0.588419
0.593476
0.598269
0.602767
0.60694
0.610762
0.614214
0.617279
0.61995
0.622226
0.624116
0.625633
0.626801
0.627647
0.628204
0.628507
0.62859
0.628485
0.62822
0.627819
0.627294
0.626663
0.625945
0.625166
0.624351
0.623531
0.622738
0.622012
0.621395
0.620929
0.620655
0.6206
0.620772
0.621111
0.621339
0.621307
0.621367
0.621558
0.621871
0.622275
0.622725
0.623171
0.62356
0.623843
0.623974
0.623912
0.623621
0.623065
0.622212
0.621039
0.619525
0.617659
0.615436
0.612856
0.609927
0.606662
0.603078
0.599195
0.595039
0.590635
0.586012
0.5812
0.576228
0.571128
0.565928
0.560657
0.555345
0.550018
0.5447
0.539417
0.53419
0.529038
0.52398
0.519031
0.514205
0.509512
0.504963
0.500562
0.496315
0.492225
0.48829
0.48451
0.48088
0.477394
0.474047
0.470829
0.467731
0.464741
0.461848
0.45904
0.456305
0.45363
0.451002
0.448409
0.445839
0.443281
0.440725
0.43816
0.435579
0.432973
0.430335
0.427661
0.424946
0.422186
0.419379
0.416523
0.413618
0.410663
0.407661
0.404612
0.401518
0.398383
0.39521
0.392002
0.388764
0.3855
0.382216
0.378917
0.375609
0.3723
0.368998
0.36571
0.362448
0.359223
0.356049
0.352942
0.349919
0.347003
0.34422
0.341598
0.339174
0.336986
0.335082
0.333514
0.332341
0.331629
0.331448
0.331868
0.33296
0.334784
0.337384
0.340776
0.344933
0.349795
0.355117
0.361215
0.570513
0.576742
0.582783
0.588592
0.594147
0.599428
0.604425
0.609136
0.613567
0.61773
0.62164
0.62532
0.628793
0.632088
0.635231
0.638255
0.641189
0.644066
0.646917
0.649773
0.652666
0.655625
0.658681
0.661863
0.665194
0.668694
0.67236
0.676062
0.679731
0.683332
0.686835
0.690213
0.693443
0.696505
0.699378
0.702044
0.704484
0.706681
0.708615
0.710267
0.711618
0.712645
0.713329
0.713647
0.713575
0.713092
0.712172
0.710795
0.708938
0.70658
0.703704
0.700294
0.69634
0.691835
0.686778
0.681176
0.675042
0.6684
0.661281
0.653727
0.645789
0.637528
0.629014
0.620326
0.611547
0.602765
0.594069
0.585544
0.57727
0.569314
0.561732
0.554563
0.547828
0.541535
0.535665
0.530228
0.52524
0.520683
0.516535
0.512781
0.509418
0.506453
0.503901
0.50178
0.500104
0.498883
0.498086
0.49726
0.49637
0.495813
0.495607
0.495731
0.496141
0.49678
0.497588
0.498504
0.499467
0.500423
0.501324
0.502143
0.502838
0.503367
0.503692
0.503782
0.503616
0.503175
0.502449
0.501435
0.500133
0.498552
0.496701
0.494595
0.492252
0.489692
0.486937
0.48401
0.480935
0.477737
0.474438
0.471064
0.467638
0.464181
0.460715
0.457261
0.453836
0.450459
0.447144
0.443905
0.440756
0.437705
0.434763
0.431935
0.429227
0.426642
0.424182
0.421846
0.419633
0.41754
0.415563
0.413695
0.411929
0.410259
0.408675
0.407168
0.405729
0.404347
0.403012
0.401716
0.400447
0.399197
0.397957
0.396719
0.395476
0.394222
0.39295
0.391655
0.390335
0.388987
0.387607
0.386196
0.384753
0.383277
0.381771
0.380237
0.378675
0.37709
0.375484
0.373861
0.372225
0.370581
0.368933
0.367287
0.365647
0.36402
0.362412
0.36083
0.359281
0.357773
0.356316
0.354919
0.353596
0.35236
0.351225
0.35021
0.349335
0.348625
0.348106
0.34781
0.34777
0.348025
0.348617
0.34959
0.350989
0.352856
0.355229
0.358138
0.361597
0.365611
0.370129
0.375392
0.898456
0.897491
0.896855
0.896525
0.896518
0.896852
0.897536
0.898574
0.899966
0.901708
0.90379
0.906197
0.90891
0.911905
0.915155
0.918629
0.922293
0.92611
0.93004
0.934042
0.93807
0.942079
0.946019
0.949841
0.95349
0.956909
0.960021
0.962684
0.96486
0.966503
0.967566
0.967996
0.967736
0.96673
0.964917
0.962238
0.95863
0.954036
0.948396
0.941654
0.933758
0.92466
0.914317
0.902695
0.889765
0.875511
0.859924
0.843012
0.824792
0.805298
0.784578
0.762697
0.739739
0.715801
0.690999
0.665466
0.639349
0.61281
0.586023
0.559172
0.532449
0.50605
0.480171
0.455008
0.430748
0.407566
0.385623
0.36506
0.345991
0.328503
0.312652
0.298458
0.285907
0.274952
0.265515
0.257691
0.251445
0.246411
0.242357
0.239127
0.236609
0.234721
0.233397
0.232581
0.232229
0.232301
0.23275
0.233709
0.235148
0.23686
0.238764
0.240819
0.243004
0.245297
0.247679
0.250128
0.252625
0.255145
0.257687
0.260358
0.263076
0.26579
0.268468
0.271088
0.273634
0.276092
0.278452
0.280706
0.28285
0.284879
0.286793
0.288594
0.290283
0.291865
0.293344
0.294727
0.29602
0.29723
0.298367
0.299436
0.300448
0.30141
0.30233
0.303217
0.304078
0.304922
0.305756
0.306586
0.307419
0.308261
0.309118
0.309995
0.310896
0.311825
0.312786
0.313782
0.314815
0.315887
0.316999
0.318152
0.319348
0.320585
0.321865
0.323187
0.324551
0.325957
0.327403
0.328891
0.33042
0.33199
0.333603
0.33526
0.336962
0.338713
0.340515
0.342373
0.344291
0.346276
0.348335
0.350475
0.352706
0.355038
0.357483
0.360053
0.362763
0.365628
0.368666
0.371895
0.375335
0.379009
0.382941
0.387156
0.391684
0.396553
0.401799
0.407455
0.41356
0.420157
0.427289
0.435006
0.443359
0.452405
0.462205
0.472823
0.48433
0.496801
0.510318
0.524965
0.540837
0.55803
0.576648
0.596798
0.618595
0.642153
0.667587
0.695011
0.724529
0.756234
0.790236
0.826637
0.0345519
0.0459715
0.0628167
0.0864037
0.125061
0.164046
0.201984
0.238802
0.275266
0.311153
0.345273
0.377491
0.407753
0.436064
0.462477
0.48708
0.509986
0.531325
0.55124
0.569878
0.587386
0.603911
0.619589
0.634549
0.648907
0.662766
0.676211
0.689315
0.70213
0.714696
0.727034
0.739153
0.751048
0.762701
0.774086
0.785165
0.795897
0.806233
0.816124
0.825517
0.834362
0.842612
0.850222
0.857157
0.863387
0.8689
0.873699
0.877638
0.880646
0.882696
0.883761
0.883815
0.882833
0.88079
0.877662
0.873423
0.868048
0.861511
0.853787
0.844854
0.834691
0.823285
0.810625
0.796708
0.781539
0.765129
0.747493
0.728641
0.708581
0.687343
0.664982
0.641551
0.617062
0.591529
0.564582
0.547192
0.538191
0.529383
0.520132
0.510379
0.500192
0.489676
0.47895
0.468124
0.457301
0.446568
0.435997
0.425647
0.415564
0.40578
0.396318
0.387193
0.378412
0.369976
0.361882
0.354124
0.346692
0.339576
0.332764
0.326242
0.319997
0.314017
0.308287
0.302795
0.297528
0.292474
0.287622
0.282959
0.278477
0.274173
0.270038
0.266063
0.262276
0.258849
0.255772
0.252941
0.250292
0.247784
0.245389
0.243089
0.240872
0.238729
0.236653
0.234638
0.232681
0.230778
0.228925
0.227122
0.225364
0.22365
0.221977
0.220345
0.21875
0.217191
0.215666
0.214174
0.212712
0.211279
0.209873
0.208492
0.207134
0.205797
0.204478
0.203176
0.201888
0.200612
0.199344
0.198082
0.196822
0.19556
0.194295
0.19302
0.191731
0.190425
0.189095
0.187736
0.186341
0.184904
0.183417
0.181872
0.18026
0.178571
0.176795
0.174918
0.172928
0.170811
0.16855
0.166128
0.163524
0.160719
0.157687
0.154404
0.15084
0.146967
0.142749
0.138154
0.133145
0.127686
0.121743
0.115289
0.108304
0.10079
0.0927767
0.0843455
0.0756522
0.0669687
0.0587096
0.0515424
0.0484368
0.0524182
0.0548562
0.0544036
0.0515189
0.048293
0.046587
0.0463228
0.0462063
0.0452817
0.0429362
0.0389421
0.060136
0.104122
0.161415
0.210652
0.2687
0.327086
0.382574
0.434868
0.483942
0.529659
0.571898
0.6107
0.646104
0.678173
0.707002
0.732719
0.755481
0.775474
0.792903
0.807992
0.820977
0.832103
0.841617
0.849767
0.856796
0.862941
0.868429
0.873471
0.878268
0.883003
0.887839
0.892925
0.898389
0.904342
0.910876
0.918069
0.92598
0.934657
0.944136
0.954441
0.965591
0.977597
0.990471
1.00422
1.01888
1.0345
1.0507
1.06743
1.08468
1.10239
1.12047
1.13881
1.15729
1.17577
1.1941
1.21207
1.2295
1.24617
1.26182
1.2762
1.28906
1.30013
1.30915
1.31585
1.31999
1.32133
1.31965
1.31475
1.30638
1.2943
1.27823
1.25784
1.23262
1.20103
1.16424
1.12877
1.09481
1.06028
1.02523
0.989859
0.954432
0.919212
0.884444
0.850344
0.817094
0.784838
0.753687
0.723716
0.694974
0.667484
0.641252
0.616264
0.592496
0.569914
0.548479
0.528145
0.508865
0.490589
0.473269
0.456854
0.441299
0.426556
0.412581
0.399332
0.386768
0.374851
0.363545
0.352815
0.342626
0.332925
0.323673
0.31487
0.306493
0.298534
0.290983
0.28382
0.277024
0.270574
0.264452
0.258639
0.25312
0.247881
0.242908
0.238188
0.233711
0.229465
0.22544
0.221625
0.218012
0.214592
0.211356
0.208295
0.205402
0.20267
0.20009
0.197657
0.195363
0.193201
0.191167
0.189253
0.187454
0.185763
0.184177
0.182688
0.181293
0.179986
0.178762
0.177617
0.176545
0.175543
0.174605
0.173728
0.172907
0.172137
0.171415
0.170737
0.170097
0.169492
0.168917
0.168368
0.16784
0.167329
0.166831
0.166339
0.165849
0.165355
0.164852
0.164332
0.163789
0.163215
0.162603
0.161941
0.161221
0.16043
0.159555
0.158581
0.157489
0.156261
0.154874
0.153303
0.151524
0.149509
0.14724
0.144707
0.14193
0.138974
0.135951
0.132661
0.13116
0.130174
0.126971
0.118347
0.103528
0.0860272
0.0708963
0.0612394
0.0572849
0.0582252
0.0603613
0.0575696
0.0825355
0.164365
0.237795
0.306985
0.383407
0.458702
0.529915
0.596723
0.659152
0.717263
0.771153
0.82092
0.866627
0.908337
0.946137
0.980145
1.01051
1.03741
1.06107
1.0817
1.09955
1.1149
1.12799
1.13911
1.14852
1.15648
1.16325
1.16908
1.1742
1.17883
1.18317
1.18742
1.19175
1.19631
1.20125
1.20669
1.21274
1.2195
1.22704
1.23543
1.24473
1.25498
1.26621
1.27844
1.29169
1.3059
1.32074
1.33635
1.35269
1.36972
1.38736
1.40551
1.42404
1.44282
1.46168
1.4804
1.49878
1.51654
1.53341
1.54909
1.56329
1.57568
1.58596
1.59381
1.59894
1.60105
1.59986
1.59502
1.58618
1.57289
1.55466
1.53065
1.4992
1.45973
1.41919
1.37898
1.33795
1.29569
1.25251
1.20881
1.16497
1.12134
1.07824
1.03597
0.994736
0.954734
0.916099
0.878926
0.843273
0.809169
0.776616
0.745599
0.716086
0.688032
0.661387
0.636095
0.612094
0.589325
0.567726
0.547237
0.527798
0.509353
0.491848
0.47523
0.45945
0.444461
0.43022
0.416685
0.403816
0.391575
0.379919
0.368796
0.358202
0.34811
0.338496
0.329337
0.320611
0.312297
0.304375
0.296826
0.289634
0.282781
0.276253
0.270034
0.264112
0.258473
0.253105
0.247996
0.243136
0.238514
0.234119
0.229943
0.225977
0.22221
0.218636
0.215246
0.212033
0.208989
0.206106
0.20338
0.200802
0.198366
0.196068
0.1939
0.191859
0.189937
0.188131
0.186435
0.184845
0.183356
0.181965
0.180667
0.179459
0.178337
0.177297
0.176337
0.175453
0.174643
0.173905
0.173235
0.172633
0.172096
0.171623
0.171212
0.170864
0.170577
0.170352
0.170189
0.170088
0.17005
0.170078
0.170173
0.170338
0.170573
0.170883
0.171267
0.171726
0.172259
0.172859
0.173517
0.174216
0.174933
0.175634
0.176279
0.176828
0.177257
0.177558
0.17749
0.177942
0.178581
0.177904
0.172033
0.155896
0.132441
0.109057
0.0913232
0.0813188
0.0790027
0.0796795
0.0709504
0.110243
0.221062
0.305886
0.390005
0.480272
0.568728
0.652571
0.731417
0.805357
0.874551
0.939155
0.999268
1.05493
1.10618
1.15305
1.19563
1.23403
1.26839
1.29891
1.3258
1.34928
1.36961
1.38706
1.40188
1.41435
1.42474
1.43331
1.44033
1.44603
1.45065
1.45443
1.45757
1.46028
1.46275
1.46516
1.46765
1.4704
1.47353
1.47716
1.4814
1.48637
1.49212
1.49875
1.50631
1.51485
1.52415
1.53421
1.54508
1.55677
1.56922
1.5824
1.59622
1.61057
1.62532
1.64031
1.65535
1.67021
1.68465
1.69839
1.71116
1.72266
1.7326
1.7407
1.74667
1.75024
1.75112
1.74902
1.74353
1.73415
1.72029
1.70112
1.67515
1.64042
1.60142
1.56209
1.52149
1.4788
1.43419
1.38816
1.34123
1.29387
1.24652
1.19956
1.15333
1.10812
1.06415
1.02159
0.980563
0.941154
0.903404
0.867327
0.832913
0.800132
0.768941
0.739289
0.711116
0.684357
0.658948
0.634823
0.611916
0.590164
0.569504
0.549877
0.531226
0.513498
0.496642
0.480609
0.465355
0.450836
0.437012
0.423844
0.411287
0.399303
0.387875
0.376972
0.366569
0.356641
0.347165
0.338119
0.329484
0.321239
0.313368
0.305852
0.298677
0.291826
0.285286
0.279043
0.273085
0.267399
0.261974
0.256798
0.251862
0.247154
0.242667
0.238391
0.234316
0.230436
0.226741
0.223225
0.21988
0.2167
0.213678
0.210808
0.208083
0.205499
0.203049
0.20073
0.198535
0.19646
0.194502
0.192656
0.190918
0.189285
0.187753
0.186321
0.184985
0.183743
0.182593
0.181534
0.180565
0.179685
0.178895
0.178194
0.177585
0.177069
0.176649
0.17633
0.176115
0.176011
0.176026
0.176168
0.176448
0.176878
0.177472
0.178245
0.179215
0.180398
0.181811
0.18347
0.185386
0.187564
0.189998
0.192668
0.195543
0.198576
0.20172
0.204932
0.208003
0.211258
0.214707
0.217272
0.215163
0.203472
0.178782
0.149104
0.123796
0.107379
0.100896
0.099467
0.0835806
0.138532
0.27523
0.368894
0.463846
0.564663
0.663745
0.758261
0.847715
0.93219
1.01186
1.08685
1.1572
1.22291
1.28391
1.34019
1.39175
1.43865
1.48099
1.51889
1.55255
1.58217
1.60797
1.63021
1.64914
1.66502
1.67813
1.68873
1.69709
1.70346
1.70809
1.71122
1.7131
1.71394
1.71396
1.71336
1.71234
1.71108
1.70975
1.70852
1.70754
1.70695
1.70687
1.7074
1.70867
1.71074
1.7134
1.71676
1.72088
1.72577
1.73142
1.73781
1.74487
1.75252
1.76064
1.76908
1.77768
1.78623
1.79451
1.80228
1.80931
1.81536
1.82022
1.82367
1.82553
1.82559
1.82368
1.81951
1.8127
1.80266
1.78865
1.76947
1.74294
1.70999
1.67588
1.64063
1.60293
1.56227
1.51899
1.47366
1.42687
1.37915
1.33102
1.28292
1.23525
1.18837
1.14255
1.09802
1.05493
1.01342
0.973548
0.935352
0.89884
0.863998
0.830791
0.799175
0.769093
0.740486
0.713289
0.687437
0.662864
0.639504
0.617296
0.596177
0.576089
0.556977
0.538787
0.52147
0.504979
0.489268
0.474296
0.460023
0.446409
0.433408
0.420997
0.409145
0.397824
0.387008
0.376673
0.366797
0.357356
0.348332
0.339705
0.331457
0.323571
0.31603
0.30882
0.301926
0.295333
0.28903
0.283004
0.277242
0.271734
0.266469
0.261438
0.256629
0.252034
0.247645
0.243453
0.239449
0.235627
0.231979
0.228498
0.225178
0.222013
0.218996
0.216122
0.213386
0.210783
0.208308
0.205956
0.203725
0.201609
0.199606
0.197713
0.195926
0.194244
0.192664
0.191186
0.189808
0.18853
0.187353
0.186276
0.185302
0.184434
0.183675
0.183031
0.182506
0.18211
0.181851
0.181742
0.181796
0.182029
0.182461
0.183113
0.184013
0.185187
0.186667
0.188486
0.190677
0.193271
0.196296
0.199768
0.203691
0.208055
0.212831
0.217983
0.223472
0.22916
0.235178
0.241648
0.247752
0.250045
0.243241
0.222702
0.190694
0.158724
0.135835
0.12479
0.120872
0.0959521
0.166337
0.326602
0.427625
0.530517
0.639461
0.747298
0.8511
0.95021
1.04464
1.13451
1.21985
1.3006
1.37664
1.44782
1.51399
1.57509
1.63107
1.68196
1.72785
1.76887
1.80517
1.83698
1.86451
1.888
1.90772
1.92393
1.93689
1.94688
1.95415
1.95897
1.9616
1.96228
1.96126
1.95877
1.95504
1.95028
1.94471
1.93853
1.93194
1.92512
1.91824
1.91148
1.90497
1.89886
1.89328
1.88805
1.88333
1.87921
1.87572
1.87291
1.87075
1.86921
1.86823
1.86771
1.86752
1.86752
1.86752
1.86735
1.86682
1.86576
1.86401
1.86146
1.85801
1.85361
1.84822
1.84178
1.83415
1.82499
1.81377
1.79964
1.78091
1.75609
1.7292
1.70192
1.67281
1.64057
1.60476
1.56567
1.52385
1.47987
1.4343
1.3877
1.34058
1.29341
1.24662
1.20055
1.15548
1.11165
1.06921
1.02829
0.988946
0.951224
0.91513
0.880648
0.847748
0.816386
0.786509
0.75806
0.730978
0.705202
0.680668
0.657315
0.635082
0.613913
0.593751
0.574543
0.55624
0.538792
0.522156
0.506288
0.491148
0.476695
0.462889
0.449697
0.437087
0.425031
0.413503
0.402477
0.391931
0.381841
0.372186
0.362947
0.354105
0.345642
0.33754
0.329785
0.322359
0.31525
0.308444
0.301926
0.295686
0.28971
0.283989
0.27851
0.273264
0.268242
0.263434
0.25883
0.254424
0.250207
0.24617
0.242308
0.238613
0.235079
0.231699
0.228468
0.22538
0.22243
0.219614
0.216926
0.214363
0.21192
0.209595
0.207384
0.205285
0.203295
0.201413
0.199637
0.197966
0.196401
0.194943
0.193592
0.192351
0.191224
0.190215
0.18933
0.188577
0.187966
0.187507
0.187216
0.187109
0.187206
0.187532
0.188113
0.188983
0.190178
0.191738
0.193709
0.196137
0.199071
0.202554
0.206626
0.211315
0.216632
0.222576
0.229134
0.236291
0.243984
0.252307
0.26143
0.27076
0.277346
0.275768
0.260368
0.230951
0.195709
0.166868
0.151184
0.144471
0.108472
0.193094
0.374448
0.482132
0.590992
0.706276
0.821442
0.93347
1.04153
1.14551
1.2454
1.34112
1.43246
1.51915
1.6009
1.67745
1.74859
1.81418
1.87418
1.9286
1.9775
2.02102
2.05933
2.09262
2.12114
2.14512
2.16482
2.18052
2.19249
2.201
2.20631
2.20869
2.2084
2.20571
2.20085
2.19408
2.18562
2.17572
2.16459
2.15246
2.13954
2.12604
2.11215
2.09806
2.08394
2.06995
2.05598
2.04225
2.02887
2.01593
2.00348
1.99153
1.98009
1.96909
1.95848
1.94813
1.93792
1.9277
1.91733
1.90666
1.89561
1.88408
1.8721
1.85969
1.84699
1.83412
1.82124
1.80839
1.79544
1.782
1.76714
1.749
1.72877
1.70947
1.68996
1.66842
1.64351
1.61469
1.58207
1.54604
1.5071
1.46579
1.42269
1.37836
1.33334
1.28813
1.24314
1.19874
1.15524
1.11284
1.07173
1.03202
0.993792
0.95708
0.921897
0.888235
0.856067
0.825355
0.796053
0.768108
0.741466
0.71607
0.691862
0.668786
0.646787
0.625809
0.605803
0.586719
0.568509
0.551128
0.534536
0.518689
0.503551
0.489081
0.475241
0.462
0.44933
0.437205
0.425598
0.414486
0.403846
0.393657
0.383898
0.37455
0.365594
0.357013
0.34879
0.340909
0.333356
0.326116
0.319176
0.312522
0.306142
0.300025
0.29416
0.288535
0.283141
0.277968
0.273007
0.268249
0.263686
0.25931
0.255113
0.251089
0.24723
0.24353
0.239984
0.236585
0.233328
0.230208
0.22722
0.224361
0.221625
0.219011
0.216514
0.214132
0.211862
0.209703
0.207655
0.205715
0.203885
0.202165
0.200557
0.199064
0.197689
0.196439
0.195319
0.194339
0.193508
0.192841
0.192352
0.192061
0.191991
0.192167
0.192623
0.193394
0.194522
0.196056
0.198047
0.200553
0.203633
0.207346
0.211749
0.216888
0.222797
0.2295
0.237011
0.245342
0.254493
0.264578
0.275793
0.287751
0.298045
0.301505
0.291811
0.266681
0.232208
0.200243
0.180442
0.170576
0.121491
0.215348
0.418088
0.532146
0.645695
0.766027
0.887421
1.00683
1.12324
1.23641
1.34617
1.45227
1.55433
1.6519
1.74453
1.8318
1.91338
1.98902
2.05856
2.12194
2.17917
2.23033
2.27555
2.31502
2.34894
2.37757
2.40115
2.41995
2.43426
2.44434
2.45047
2.45293
2.45199
2.44792
2.44097
2.43141
2.41947
2.40542
2.38948
2.37191
2.35292
2.33275
2.31163
2.28977
2.26738
2.24462
2.22149
2.19825
2.17504
2.15201
2.12924
2.10678
2.08464
2.06281
2.04121
2.01976
1.99834
1.97682
1.95509
1.93304
1.91062
1.88785
1.86483
1.84175
1.81889
1.79661
1.7753
1.75527
1.73668
1.71941
1.70251
1.6854
1.67096
1.65898
1.64733
1.63407
1.61769
1.5974
1.573
1.54461
1.51257
1.47733
1.43945
1.3995
1.35808
1.31576
1.27304
1.23038
1.18814
1.14664
1.10609
1.06669
1.02855
0.991763
0.956365
0.922379
0.889801
0.858613
0.828785
0.800277
0.773044
0.747037
0.722206
0.6985
0.675868
0.654259
0.633625
0.613919
0.595095
0.57711
0.559922
0.543492
0.527782
0.512754
0.498366
0.484589
0.471395
0.458756
0.446648
0.435047
0.42393
0.413275
0.403061
0.393269
0.38388
0.374877
0.366241
0.357957
0.35001
0.342385
0.335068
0.328045
0.321304
0.314834
0.308621
0.302656
0.296927
0.291426
0.286142
0.281066
0.27619
0.271506
0.267006
0.262682
0.258528
0.254536
0.250701
0.247017
0.243478
0.24008
0.236816
0.233684
0.230678
0.227796
0.225034
0.22239
0.219861
0.217445
0.215142
0.21295
0.210871
0.208905
0.207054
0.205322
0.203712
0.202231
0.200885
0.199684
0.198639
0.197764
0.197076
0.196596
0.196347
0.196359
0.196665
0.197306
0.198327
0.199779
0.201722
0.204219
0.207339
0.211151
0.215723
0.221118
0.227389
0.234579
0.242723
0.251863
0.262044
0.273404
0.286162
0.300103
0.313345
0.32125
0.317306
0.297613
0.266102
0.233944
0.212797
0.199242
0.135395
0.234844
0.456954
0.577311
0.694789
0.819256
0.946022
1.0721
1.19635
1.31835
1.43776
1.55415
1.66694
1.7755
1.87918
1.9774
2.06968
2.15564
2.23502
2.30766
2.37352
2.43263
2.48507
2.53102
2.57067
2.60425
2.63202
2.65424
2.6712
2.68319
2.69047
2.69335
2.69209
2.68697
2.67827
2.66624
2.65116
2.63326
2.61282
2.59006
2.56525
2.53862
2.51043
2.4809
2.45029
2.41874
2.38637
2.35348
2.32029
2.28696
2.25364
2.2204
2.18728
2.15428
2.12136
2.08843
2.05538
2.0221
1.98847
1.9544
1.9199
1.88501
1.84993
1.81497
1.78056
1.74728
1.71575
1.68659
1.66033
1.63719
1.61674
1.601
1.59108
1.58499
1.58043
1.57531
1.56789
1.557
1.542
1.52265
1.499
1.47136
1.44017
1.40601
1.3695
1.33126
1.29189
1.25192
1.21182
1.17196
1.13267
1.09418
1.05668
1.02029
0.985109
0.951188
0.918551
0.887204
0.857136
0.828324
0.800737
0.774337
0.749082
0.724928
0.701832
0.679747
0.658629
0.638434
0.61912
0.600645
0.58297
0.566056
0.549867
0.534366
0.519512
0.505275
0.491627
0.478543
0.465998
0.453968
0.44243
0.431363
0.420745
0.410558
0.400782
0.3914
0.382393
0.373746
0.365443
0.357469
0.349809
0.342451
0.335381
0.328587
0.322056
0.315779
0.309743
0.303939
0.298357
0.292988
0.287822
0.282853
0.27807
0.273468
0.269039
0.264775
0.260671
0.256721
0.252918
0.249259
0.245736
0.242348
0.239088
0.235954
0.232943
0.230051
0.227276
0.224617
0.222073
0.219642
0.217327
0.215127
0.213045
0.211084
0.20925
0.207546
0.205983
0.204568
0.203315
0.202237
0.201352
0.200683
0.200253
0.200095
0.200242
0.200737
0.201629
0.202972
0.204829
0.20727
0.210368
0.214202
0.21885
0.224384
0.230874
0.238376
0.246946
0.256643
0.267542
0.279802
0.293651
0.309007
0.324456
0.336037
0.337471
0.323803
0.296975
0.266998
0.247003
0.229903
0.150738
0.251874
0.490743
0.617339
0.738325
0.866297
0.997762
1.12991
1.26149
1.39193
1.52071
1.64719
1.77061
1.89011
2.00486
2.1141
2.21717
2.31358
2.40295
2.48503
2.55969
2.62693
2.6868
2.73943
2.78501
2.82377
2.85595
2.88183
2.90171
2.91586
2.92458
2.92816
2.9269
2.92107
2.91095
2.89682
2.87895
2.85759
2.833
2.80545
2.77518
2.74245
2.70751
2.67064
2.63208
2.59196
2.55054
2.50818
2.46515
2.42168
2.37792
2.33402
2.29003
2.24599
2.20184
2.15754
2.11295
2.06796
2.02245
1.97634
1.92962
1.88239
1.83489
1.78751
1.74082
1.69554
1.65251
1.61267
1.57693
1.54586
1.52125
1.50548
1.49738
1.49483
1.49564
1.4976
1.49868
1.49726
1.49219
1.48273
1.46857
1.44973
1.42648
1.39932
1.36887
1.33579
1.30074
1.26435
1.22717
1.18967
1.15226
1.11524
1.07887
1.04334
1.00877
0.975272
0.942897
0.911679
0.881633
0.852754
0.825027
0.798429
0.772929
0.748492
0.725081
0.702658
0.681184
0.660618
0.640922
0.622059
0.60399
0.586679
0.570093
0.554195
0.538943
0.52431
0.510271
0.496799
0.48387
0.471462
0.459552
0.448118
0.437139
0.426598
0.416473
0.406748
0.397404
0.388426
0.379798
0.371504
0.363531
0.355864
0.34849
0.341396
0.334571
0.328003
0.321682
0.315596
0.309736
0.304092
0.298655
0.293417
0.28837
0.283506
0.278817
0.274297
0.269939
0.265736
0.261684
0.257777
0.25401
0.250378
0.246877
0.243503
0.240254
0.237126
0.234118
0.231227
0.228453
0.225795
0.223253
0.22083
0.218526
0.216347
0.214296
0.212379
0.210606
0.208985
0.207529
0.206253
0.205176
0.204319
0.203709
0.203377
0.20336
0.203701
0.204451
0.205668
0.207418
0.209773
0.212813
0.216623
0.221288
0.226893
0.233514
0.241224
0.250088
0.260179
0.27159
0.284484
0.299092
0.315424
0.33246
0.346949
0.35316
0.345641
0.3247
0.298957
0.281477
0.262118
0.167077
0.266501
0.519373
0.652059
0.77633
0.907372
1.04299
1.18066
1.31908
1.45752
1.5953
1.73158
1.86538
1.99564
2.12133
2.24149
2.35532
2.46215
2.56151
2.65304
2.73657
2.812
2.87936
2.93877
2.99039
3.03444
3.07118
3.10088
3.12384
3.14035
3.15073
3.15526
3.15425
3.14799
3.13677
3.12086
3.10055
3.0761
3.04777
3.01583
2.98053
2.94213
2.90089
2.8571
2.81101
2.76272
2.71264
2.66121
2.60873
2.55549
2.50169
2.44752
2.39308
2.3384
2.28347
2.22822
2.17254
2.11628
2.05932
2.00157
1.94301
1.88374
1.82405
1.76436
1.70532
1.64779
1.59277
1.54143
1.49496
1.45539
1.42624
1.40758
1.39809
1.3961
1.39971
1.40673
1.4149
1.42215
1.42676
1.42743
1.42334
1.4141
1.39974
1.38059
1.35719
1.33021
1.30037
1.26837
1.23484
1.20036
1.16541
1.13039
1.09561
1.06134
1.02777
0.99502
0.963204
0.932386
0.902604
0.873878
0.846212
0.819597
0.794017
0.769448
0.745863
0.723229
0.701515
0.680687
0.660709
0.641549
0.623172
0.605544
0.588634
0.572405
0.556818
0.541849
0.527473
0.513664
0.500401
0.48766
0.47542
0.463659
0.452358
0.441497
0.431058
0.421022
0.411372
0.402091
0.393165
0.384577
0.376314
0.368361
0.360705
0.353333
0.346234
0.339395
0.332805
0.326455
0.320333
0.314431
0.308739
0.303248
0.29795
0.292838
0.287903
0.283139
0.27854
0.274098
0.269808
0.265666
0.261664
0.2578
0.254069
0.250467
0.246991
0.243639
0.240407
0.237294
0.2343
0.231424
0.228666
0.226028
0.223513
0.221123
0.218865
0.216743
0.214767
0.212947
0.211294
0.209826
0.20856
0.207518
0.206729
0.206223
0.206039
0.206222
0.206822
0.207901
0.209527
0.211776
0.214733
0.218488
0.223134
0.228764
0.235465
0.243319
0.252399
0.262786
0.27458
0.28794
0.303085
0.320091
0.338249
0.354987
0.365309
0.36375
0.349438
0.329564
0.316165
0.296008
0.183812
0.278771
0.542929
0.681431
0.808849
0.94265
1.08197
1.22465
1.36939
1.51535
1.66168
1.80734
1.95116
2.09186
2.2282
2.35905
2.48342
2.60052
2.70973
2.81062
2.90291
2.98648
3.06131
3.12748
3.18515
3.23453
3.27588
3.30947
3.33561
3.35461
3.36678
3.37243
3.37186
3.3654
3.35332
3.33592
3.31348
3.28629
3.25459
3.21867
3.17878
3.13517
3.08811
3.03789
2.98479
2.92883
2.87062
2.81063
2.74925
2.6868
2.62353
2.55968
2.49537
2.43067
2.36556
2.30001
2.23388
2.16699
2.09924
2.03048
1.96072
1.89003
1.81875
1.74731
1.67644
1.60707
1.54032
1.47752
1.42068
1.37412
1.3389
1.31471
1.30085
1.29631
1.29965
1.30893
1.32187
1.33606
1.34926
1.35955
1.3655
1.36622
1.36134
1.35092
1.33536
1.31529
1.29143
1.26453
1.23532
1.20445
1.17251
1.13997
1.10723
1.07461
1.04237
1.01069
0.979719
0.949552
0.920264
0.891898
0.864481
0.838022
0.812518
0.78796
0.76433
0.741606
0.719763
0.698773
0.678608
0.659238
0.640632
0.622762
0.605597
0.589098
0.573235
0.557984
0.543321
0.529225
0.515671
0.50264
0.49011
0.47806
0.466471
0.455325
0.444601
0.434283
0.424355
0.414798
0.405599
0.396741
0.38821
0.379992
0.372074
0.364443
0.357087
0.349995
0.343154
0.336555
0.330188
0.324042
0.318108
0.312378
0.306843
0.301495
0.296328
0.291333
0.286503
0.281834
0.277319
0.272951
0.268728
0.264642
0.260692
0.256872
0.25318
0.249612
0.246167
0.242843
0.239639
0.236554
0.23359
0.230748
0.22803
0.225439
0.222982
0.220663
0.218493
0.21648
0.214638
0.212983
0.211532
0.21031
0.209343
0.208664
0.208312
0.208332
0.208776
0.209708
0.211197
0.213325
0.216179
0.219855
0.224456
0.23008
0.236826
0.244783
0.254035
0.264664
0.27677
0.2905
0.306056
0.323551
0.342521
0.360999
0.374811
0.378852
0.37154
0.358744
0.350848
0.331514
0.20089
0.288748
0.561608
0.705526
0.835973
0.972288
1.1149
1.2621
1.41263
1.56556
1.7199
1.87443
2.02777
2.17844
2.32502
2.46617
2.60075
2.72781
2.84662
2.95664
3.05752
3.14906
3.23122
3.30405
3.36769
3.42235
3.46828
3.50576
3.53511
3.55664
3.57068
3.57755
3.57757
3.57106
3.55832
3.53965
3.51536
3.48571
3.451
3.41148
3.36742
3.31907
3.26668
3.21057
3.151
3.08793
3.02219
2.95429
2.88466
2.81366
2.74161
2.66875
2.59528
2.52127
2.4467
2.37154
2.29566
2.21886
2.14101
2.06195
1.98169
1.9003
1.81815
1.73571
1.65379
1.57337
1.49571
1.42281
1.35893
1.3056
1.26328
1.23201
1.2117
1.20201
1.20211
1.21055
1.2253
1.24389
1.26372
1.28231
1.29759
1.30805
1.31279
1.31151
1.30434
1.29179
1.27454
1.25336
1.22905
1.20234
1.17389
1.14428
1.11398
1.08339
1.05281
1.02249
0.992621
0.963346
0.934768
0.906959
0.879969
0.853827
0.828549
0.804137
0.780587
0.757886
0.736019
0.714965
0.694701
0.675203
0.656445
0.638402
0.621046
0.604338
0.588254
0.572773
0.557874
0.543534
0.529734
0.516453
0.503671
0.491367
0.479524
0.468123
0.457145
0.446574
0.436393
0.426585
0.417136
0.40803
0.399252
0.39079
0.38263
0.374758
0.367164
0.359834
0.352759
0.345927
0.339328
0.332953
0.326792
0.320836
0.315077
0.309507
0.304118
0.298904
0.293857
0.288972
0.284242
0.279662
0.275226
0.270931
0.266772
0.262745
0.258847
0.255075
0.251428
0.247903
0.244499
0.241217
0.238056
0.235019
0.232108
0.229326
0.226679
0.224173
0.221816
0.219619
0.217595
0.215759
0.21413
0.212731
0.21159
0.21074
0.210218
0.210072
0.210356
0.211132
0.212475
0.214466
0.2172
0.220778
0.225308
0.230902
0.237667
0.245701
0.255097
0.26594
0.278326
0.292385
0.308297
0.326193
0.345796
0.365664
0.382448
0.391667
0.391462
0.386583
0.385363
0.369054
0.218308
0.296516
0.575683
0.724512
0.857846
0.996457
1.14199
1.29321
1.44896
1.60826
1.76999
1.93276
2.09501
2.25508
2.41134
2.56228
2.7066
2.8432
2.97122
3.09003
3.19919
3.29846
3.38774
3.46705
3.53651
3.59632
3.64674
3.68805
3.72058
3.74464
3.76057
3.76872
3.76941
3.76296
3.74971
3.72997
3.70404
3.67222
3.63479
3.59203
3.54421
3.49157
3.43437
3.3729
3.30739
3.23787
3.16527
3.09016
3.013
2.93419
2.8541
2.773
2.69112
2.60854
2.52524
2.44118
2.35626
2.27021
2.1829
2.09418
2.00404
1.91258
1.82023
1.72757
1.63537
1.54475
1.45825
1.37928
1.30946
1.24964
1.20027
1.16168
1.13416
1.11785
1.11251
1.11732
1.1308
1.15077
1.17457
1.19938
1.22254
1.24188
1.25588
1.26372
1.26519
1.26055
1.25037
1.23541
1.21649
1.19441
1.1699
1.14361
1.11612
1.08787
1.05926
1.03058
1.00206
0.973906
0.946241
0.919175
0.892781
0.867112
0.842199
0.818063
0.794711
0.772143
0.750352
0.729325
0.709048
0.689501
0.670665
0.652517
0.635029
0.618169
0.601918
0.586258
0.571169
0.556632
0.542626
0.529134
0.516136
0.503613
0.491548
0.479923
0.46872
0.457922
0.447514
0.43748
0.427804
0.418472
0.409469
0.400783
0.392398
0.384305
0.376489
0.368939
0.361645
0.354596
0.347781
0.34119
0.334816
0.328647
0.322678
0.316898
0.311302
0.305881
0.300628
0.295539
0.290606
0.285825
0.281189
0.276696
0.272339
0.268117
0.264024
0.26006
0.256221
0.252505
0.248913
0.245444
0.242098
0.238877
0.235783
0.232821
0.229994
0.22731
0.224776
0.222404
0.220205
0.218196
0.216395
0.214825
0.213514
0.212493
0.211804
0.211491
0.211609
0.212224
0.213409
0.215251
0.217849
0.221309
0.225749
0.231289
0.238048
0.246139
0.255661
0.266705
0.279361
0.293744
0.310007
0.328288
0.348452
0.369496
0.388863
0.40285
0.409691
0.413282
0.41964
0.406408
0.235776
0.302174
0.585477
0.738634
0.874669
1.01536
1.16342
1.31816
1.47854
1.64355
1.81196
1.98225
2.15269
2.32145
2.48671
2.64681
2.80027
2.94587
3.08262
3.20979
3.32686
3.43352
3.52962
3.61516
3.69024
3.75503
3.80981
3.85485
3.89047
3.91703
3.93485
3.9443
3.94571
3.93942
3.92578
3.90512
3.87776
3.844
3.80416
3.75851
3.70732
3.65085
3.58935
3.52309
3.45221
3.3769
3.29815
3.21656
3.13264
3.04681
2.95946
2.87089
2.78136
2.69097
2.59966
2.50738
2.41403
2.31934
2.22311
2.12524
2.02572
1.92468
1.82264
1.72045
1.61846
1.52049
1.42872
1.34473
1.26957
1.20396
1.14837
1.10323
1.06895
1.04587
1.03413
1.0335
1.04317
1.06157
1.08638
1.11476
1.14367
1.17038
1.1927
1.20919
1.21917
1.22254
1.2197
1.2113
1.19814
1.18106
1.16085
1.13825
1.11389
1.0883
1.06192
1.03512
1.0082
0.981363
0.954804
0.928654
0.903015
0.877963
0.85355
0.829812
0.806772
0.78444
0.762821
0.741911
0.721702
0.702182
0.683336
0.665148
0.647586
0.63063
0.614266
0.598476
0.583243
0.56855
0.55438
0.540714
0.527536
0.514828
0.502573
0.490753
0.479353
0.468357
0.457747
0.44751
0.437631
0.428094
0.418887
0.409995
0.401407
0.393108
0.385088
0.377336
0.369839
0.362587
0.355571
0.348781
0.342207
0.335841
0.329674
0.323698
0.317906
0.312291
0.306846
0.301565
0.296442
0.291472
0.286649
0.281968
0.277427
0.27302
0.268745
0.2646
0.260581
0.256688
0.252919
0.249274
0.245754
0.24236
0.239095
0.235962
0.232966
0.230114
0.227414
0.224875
0.222511
0.220337
0.218371
0.216636
0.215159
0.213973
0.213117
0.212637
0.212587
0.213034
0.214054
0.215736
0.218182
0.221506
0.225833
0.231296
0.238028
0.246153
0.255786
0.267023
0.279953
0.294677
0.311322
0.330025
0.350755
0.372878
0.394562
0.412963
0.426699
0.439089
0.453692
0.443281
0.253375
0.305835
0.591346
0.7482
0.886698
1.02922
1.17945
1.33718
1.50155
1.67155
1.84587
2.02286
2.20067
2.37729
2.55076
2.71926
2.88117
3.03512
3.18002
3.31502
3.43954
3.55318
3.65576
3.74723
3.82767
3.89724
3.9562
4.00484
4.04349
4.07248
4.09218
4.10293
4.10511
4.09907
4.08516
4.06373
4.03513
3.99968
3.95771
3.90951
3.85537
3.79552
3.73024
3.65977
3.58412
3.50371
3.41953
3.33223
3.24233
3.15028
3.05647
2.96122
2.86478
2.76728
2.66864
2.56875
2.4675
2.36468
2.25995
2.1533
2.0448
1.93447
1.82316
1.71204
1.60394
1.50097
1.40459
1.31598
1.23606
1.16552
1.10485
1.05443
1.01458
0.985611
0.967786
0.961206
0.965628
0.9802
1.00327
1.03236
1.06447
1.09648
1.12561
1.14979
1.16773
1.17892
1.18342
1.18173
1.17457
1.16276
1.14715
1.12852
1.10756
1.08488
1.06099
1.03631
1.01117
0.985855
0.960576
0.935504
0.91077
0.886472
0.862684
0.83946
0.816838
0.794841
0.773485
0.752776
0.732714
0.713293
0.694506
0.676336
0.658758
0.641761
0.625333
0.609461
0.594129
0.579323
0.565027
0.551226
0.537903
0.525043
0.512628
0.500644
0.489075
0.477905
0.46712
0.456704
0.446644
0.436925
0.427533
0.418457
0.409683
0.401198
0.392992
0.385053
0.37737
0.369932
0.362731
0.355755
0.348996
0.342446
0.336096
0.329938
0.323965
0.318169
0.312545
0.307085
0.301785
0.296638
0.291639
0.286785
0.282071
0.277493
0.273048
0.268733
0.264547
0.260487
0.256553
0.252745
0.249063
0.245508
0.242083
0.238791
0.235638
0.232629
0.229772
0.227077
0.224557
0.222227
0.220104
0.218211
0.216575
0.215228
0.214208
0.213561
0.213343
0.213619
0.214467
0.215977
0.218257
0.221427
0.225621
0.230983
0.237661
0.245799
0.255526
0.26695
0.280164
0.295255
0.312334
0.331528
0.352889
0.376084
0.399929
0.422468
0.442912
0.46426
0.48759
0.480251
0.271806
0.307616
0.593661
0.753564
0.894241
1.03834
1.19033
1.35051
1.51821
1.69242
1.8718
2.0546
2.23884
2.4224
2.60319
2.77923
2.94878
3.11034
3.26271
3.40495
3.53637
3.65654
3.76519
3.86225
3.94777
4.02189
4.08486
4.13696
4.17853
4.2099
4.23146
4.24355
4.24656
4.24086
4.22681
4.20479
4.17516
4.13827
4.09448
4.04409
3.98739
3.92464
3.8561
3.78201
3.70219
3.61737
3.52851
3.43627
3.34118
3.2437
3.14422
3.04302
2.94038
2.83643
2.73108
2.62411
2.51537
2.40483
2.29192
2.17668
2.0596
1.93999
1.82134
1.70457
1.59156
1.48401
1.38321
1.29029
1.20618
1.13156
1.0669
1.01249
0.968498
0.935053
0.912297
0.900381
0.899365
0.908982
0.928329
0.955635
0.988293
1.02318
1.05716
1.08759
1.11255
1.13098
1.14256
1.14752
1.14642
1.14004
1.12922
1.11477
1.09744
1.07789
1.05669
1.03432
1.01116
0.987529
0.963691
0.939845
0.916152
0.892734
0.869689
0.847087
0.824983
0.803415
0.782409
0.761981
0.742139
0.722888
0.704222
0.686127
0.66859
0.651605
0.635163
0.619254
0.603867
0.588989
0.574606
0.560706
0.547272
0.534292
0.521749
0.50963
0.497919
0.486602
0.475666
0.465095
0.454876
0.444996
0.435442
0.4262
0.41726
0.408608
0.400233
0.392125
0.384272
0.376665
0.369293
0.362147
0.355219
0.3485
0.34198
0.335654
0.329513
0.323551
0.31776
0.312135
0.30667
0.30136
0.2962
0.291184
0.28631
0.281573
0.276971
0.2725
0.268159
0.263945
0.259859
0.255899
0.252067
0.248363
0.24479
0.241351
0.238052
0.234897
0.231894
0.229054
0.226388
0.22391
0.221639
0.219597
0.217808
0.216305
0.215125
0.214315
0.213928
0.21403
0.214701
0.216032
0.218134
0.221132
0.225171
0.230408
0.237008
0.245133
0.254934
0.266537
0.280042
0.295534
0.313108
0.332886
0.354979
0.379305
0.405253
0.431734
0.458705
0.489066
0.521524
0.517381
0.294452
0.30764
0.592802
0.755114
0.897652
1.04304
1.1964
1.35846
1.52878
1.7064
1.88991
2.07753
2.2672
2.45668
2.64378
2.8264
3.00269
3.17103
3.33011
3.47891
3.61665
3.74282
3.8571
3.95938
4.04967
4.1281
4.19489
4.25032
4.29472
4.32844
4.35185
4.36533
4.36926
4.36401
4.34999
4.32757
4.29715
4.25911
4.21382
4.16161
4.10279
4.03761
3.96636
3.8892
3.80587
3.71733
3.6245
3.52807
3.42855
3.3264
3.22199
3.11557
3.00734
2.89749
2.78597
2.67241
2.55642
2.43847
2.31767
2.19381
2.06907
1.94345
1.81906
1.69727
1.5796
1.46757
1.36252
1.26562
1.17784
1.09989
1.0322
0.974938
0.92812
0.891649
0.865431
0.849457
0.843799
0.848487
0.863216
0.886997
0.917932
0.953289
0.989912
1.02477
1.0554
1.08017
1.09827
1.10958
1.11443
1.11349
1.10755
1.09741
1.08386
1.06761
1.04927
1.02936
1.00834
0.98655
0.964293
0.941806
0.919277
0.896856
0.874661
0.852783
0.831291
0.810238
0.789663
0.769591
0.750042
0.731024
0.71254
0.694576
0.677134
0.660211
0.643803
0.627903
0.612502
0.597592
0.58316
0.569195
0.555685
0.542616
0.529975
0.517749
0.505924
0.494487
0.483424
0.472721
0.462367
0.452347
0.442651
0.433264
0.424176
0.415375
0.40685
0.398589
0.390584
0.382823
0.375297
0.367997
0.360915
0.354041
0.347367
0.340887
0.334592
0.328477
0.322534
0.316757
0.311141
0.305681
0.300372
0.295208
0.290187
0.285304
0.280557
0.275942
0.271459
0.267104
0.262877
0.258779
0.254809
0.250968
0.247259
0.243685
0.24025
0.23696
0.233823
0.230847
0.228045
0.22543
0.22302
0.220836
0.218902
0.21725
0.215916
0.214945
0.214391
0.21432
0.21481
0.215955
0.217868
0.220681
0.224546
0.229633
0.236126
0.244211
0.254063
0.26583
0.279632
0.295557
0.313692
0.334157
0.357111
0.382676
0.410745
0.441055
0.474405
0.51378
0.555744
0.555108
0.31629
0.306034
0.589145
0.753265
0.897331
1.04372
1.19803
1.36142
1.53363
1.71379
1.90046
2.09185
2.28583
2.48012
2.67243
2.86058
3.04261
3.21681
3.38178
3.53639
3.67979
3.8114
3.93085
4.03795
4.13269
4.21517
4.28559
4.34422
4.39138
4.42743
4.45273
4.46767
4.47263
4.46801
4.45421
4.43163
4.4007
4.36181
4.31537
4.26174
4.20123
4.13412
4.06071
3.98102
3.89483
3.80325
3.70715
3.60723
3.50399
3.39787
3.28921
3.17822
3.06492
2.94963
2.83237
2.71265
2.58945
2.46433
2.33661
2.20669
2.07587
1.94496
1.81545
1.68882
1.56655
1.45016
1.34108
1.24056
1.14963
1.06901
0.999068
0.939905
0.891361
0.853125
0.824848
0.806258
0.797247
0.797856
0.808094
0.827602
0.85527
0.889055
0.926147
0.963447
0.998123
1.02801
1.05179
1.0689
1.07941
1.08376
1.08265
1.07687
1.06718
1.05433
1.03898
1.02168
1.00291
0.98309
0.962538
0.941526
0.920273
0.898953
0.877707
0.856644
0.83585
0.815394
0.795325
0.775682
0.75649
0.737768
0.719519
0.701742
0.684447
0.667635
0.651306
0.635458
0.620086
0.605181
0.590737
0.576743
0.563188
0.550062
0.537353
0.525048
0.513135
0.501603
0.490437
0.479627
0.469159
0.459022
0.449203
0.439691
0.430475
0.421543
0.412885
0.40449
0.396349
0.388451
0.380787
0.373348
0.366126
0.359113
0.3523
0.34568
0.339246
0.332991
0.326909
0.320995
0.315241
0.309644
0.304199
0.2989
0.293745
0.288729
0.28385
0.279104
0.27449
0.270007
0.265652
0.261427
0.25733
0.253364
0.249531
0.245833
0.242274
0.23886
0.235599
0.232499
0.229571
0.226829
0.224288
0.221971
0.2199
0.218105
0.216623
0.215497
0.21478
0.214536
0.214845
0.2158
0.217516
0.22013
0.223802
0.228717
0.235077
0.243091
0.252966
0.26488
0.278977
0.295362
0.314124
0.335382
0.35934
0.38629
0.416562
0.450665
0.490292
0.538669
0.590432
0.593536
0.338464
0.302923
0.583063
0.748453
0.893726
1.04084
1.19571
1.35985
1.53321
1.71502
1.90382
2.09784
2.29495
2.49284
2.68917
2.8817
3.0684
3.24747
3.41741
3.57703
3.72539
3.86183
3.98592
4.09743
4.19628
4.28255
4.35642
4.41814
4.46801
4.5064
4.53365
4.55016
4.55631
4.55251
4.53917
4.51671
4.48557
4.44618
4.39897
4.34433
4.2826
4.21405
4.139
4.05728
3.96889
3.87494
3.77621
3.67345
3.56714
3.45767
3.34536
3.23035
3.11241
2.99197
2.86931
2.74389
2.61528
2.48439
2.35104
2.21586
2.07979
1.94388
1.80948
1.67809
1.5513
1.43073
1.3179
1.21417
1.12061
1.03791
0.966426
0.906148
0.856774
0.817799
0.788633
0.76873
0.757717
0.755464
0.762024
0.77739
0.801114
0.831932
0.867662
0.905481
0.942444
0.976014
1.00438
1.02654
1.04219
1.05154
1.05511
1.05361
1.0478
1.0384
1.0261
1.01149
0.995093
0.977338
0.958591
0.939154
0.919273
0.899146
0.878936
0.858772
0.838757
0.818972
0.79948
0.78033
0.761558
0.743188
0.725227
0.70769
0.690591
0.673937
0.657732
0.641978
0.626673
0.611813
0.597391
0.583401
0.569834
0.556681
0.543932
0.531576
0.519602
0.507999
0.496755
0.48586
0.4753
0.465066
0.455146
0.445529
0.436203
0.427159
0.418386
0.409874
0.401614
0.393595
0.385809
0.378247
0.370901
0.363762
0.356824
0.350079
0.34352
0.33714
0.330933
0.324893
0.319016
0.313295
0.307727
0.302306
0.297029
0.291892
0.286893
0.282029
0.277297
0.272697
0.268226
0.263886
0.259675
0.255595
0.251648
0.247837
0.244165
0.240638
0.237263
0.234048
0.231004
0.228144
0.225482
0.22304
0.22084
0.218911
0.217287
0.216011
0.215135
0.214722
0.21485
0.215613
0.217127
0.219532
0.222997
0.227718
0.233917
0.241831
0.251698
0.263735
0.278121
0.294986
0.314433
0.336594
0.361709
0.390214
0.422819
0.460751
0.506612
0.563986
0.625766
0.632816
0.361367
0.298434
0.574925
0.74114
0.887342
1.03496
1.19001
1.35433
1.52808
1.71064
1.90048
2.09593
2.29491
2.49511
2.69419
2.88988
3.08007
3.26293
3.43688
3.60063
3.75319
3.89382
4.02201
4.13747
4.24009
4.32989
4.40702
4.47172
4.52427
4.56501
4.5943
4.61252
4.62006
4.61731
4.6047
4.58267
4.55167
4.51216
4.46458
4.40937
4.34685
4.27737
4.20117
4.11792
4.02796
3.93227
3.83145
3.72643
3.6176
3.50532
3.38985
3.27135
3.14974
3.02536
2.89826
2.76819
2.63507
2.49935
2.36118
2.22119
2.08024
1.93947
1.80025
1.6642
1.53304
1.40852
1.29231
1.18582
1.09014
1.00598
0.933607
0.872915
0.823464
0.784577
0.755449
0.735288
0.723449
0.719554
0.723523
0.735451
0.755302
0.782492
0.81558
0.852281
0.889839
0.92556
0.95727
0.983533
1.00365
1.01753
1.0255
1.02814
1.02612
1.02016
1.01094
0.999071
0.985098
0.969484
0.952624
0.934843
0.916415
0.897562
0.878467
0.859276
0.84011
0.821064
0.802214
0.783619
0.765323
0.747357
0.729737
0.71249
0.695634
0.679182
0.663144
0.647524
0.632324
0.617543
0.603179
0.589225
0.575677
0.562527
0.549766
0.537386
0.525376
0.513727
0.502429
0.491471
0.480842
0.470532
0.46053
0.450827
0.441411
0.432273
0.423402
0.41479
0.406427
0.398303
0.39041
0.382741
0.375286
0.368037
0.360988
0.354132
0.347461
0.340969
0.334651
0.3285
0.322511
0.31668
0.311001
0.30547
0.300084
0.294839
0.289732
0.284761
0.279923
0.275217
0.270642
0.266197
0.261882
0.2577
0.25365
0.249736
0.245962
0.242332
0.238853
0.235533
0.232382
0.229412
0.226639
0.22408
0.221759
0.219703
0.217945
0.216526
0.215496
0.214918
0.214868
0.215438
0.216747
0.218937
0.222182
0.226691
0.232704
0.240486
0.250313
0.262445
0.277105
0.294463
0.31465
0.337817
0.364244
0.394494
0.429607
0.471467
0.523583
0.589975
0.66195
0.673125
0.385119
0.292693
0.565104
0.73183
0.878757
1.02673
1.18165
1.34558
1.51895
1.70137
1.8911
2.08672
2.28623
2.48738
2.68785
2.88537
3.07782
3.26332
3.44023
3.60719
3.76313
3.90724
4.03894
4.15789
4.2639
4.35696
4.43717
4.50474
4.55994
4.60308
4.63452
4.65461
4.66375
4.66232
4.65075
4.62949
4.599
4.55975
4.51223
4.45687
4.39402
4.32406
4.24717
4.16289
4.07191
3.97508
3.87305
3.76655
3.65598
3.54167
3.42386
3.30271
3.17829
3.05078
2.92015
2.78631
2.64931
2.50939
2.36684
2.22228
2.0766
1.93102
1.78705
1.64646
1.51114
1.38301
1.26382
1.15508
1.05787
0.972861
0.900243
0.839794
0.790935
0.752824
0.724471
0.704867
0.693127
0.688622
0.691065
0.700481
0.71701
0.740541
0.770313
0.804712
0.841412
0.877814
0.911554
0.940848
0.964624
0.982458
0.994424
1.00093
1.00255
0.999947
0.993779
0.984661
0.973149
0.959726
0.944809
0.928749
0.911841
0.894329
0.876415
0.858264
0.840011
0.821765
0.803616
0.785631
0.767865
0.75035
0.733121
0.716211
0.699643
0.683437
0.667605
0.652158
0.6371
0.622433
0.608159
0.594274
0.580774
0.567655
0.55491
0.542531
0.530511
0.518841
0.507511
0.496512
0.485835
0.47547
0.465407
0.455636
0.446148
0.436933
0.427983
0.419287
0.410837
0.402625
0.394642
0.386879
0.37933
0.371986
0.36484
0.357886
0.351117
0.344527
0.33811
0.331861
0.325773
0.319843
0.314066
0.308437
0.302954
0.297612
0.292409
0.287342
0.282409
0.277608
0.272939
0.2684
0.263993
0.259717
0.255575
0.251568
0.2477
0.243977
0.240403
0.236987
0.233738
0.230667
0.22779
0.225123
0.222689
0.220513
0.218628
0.217073
0.215897
0.215159
0.214934
0.215315
0.216418
0.218388
0.221405
0.225686
0.231489
0.23911
0.248863
0.261058
0.275973
0.293829
0.3148
0.339072
0.366969
0.399166
0.436997
0.482946
0.541406
0.616887
0.699231
0.714694
0.409822
0.285836
0.554016
0.721111
0.868657
1.01703
1.1715
1.33452
1.50667
1.68765
1.87611
2.07063
2.26932
2.47005
2.67055
2.86858
3.06203
3.24898
3.42775
3.59695
3.75541
3.90226
4.03685
4.15877
4.26777
4.36379
4.44688
4.51721
4.57502
4.62061
4.65431
4.67647
4.68745
4.68765
4.67748
4.65738
4.62783
4.58931
4.54233
4.48734
4.4247
4.35481
4.27775
4.19314
4.10185
4.00461
3.90208
3.79484
3.6833
3.56774
3.44839
3.32543
3.19894
3.06902
2.9356
2.79864
2.6582
2.51444
2.36771
2.21866
2.06828
1.91793
1.76929
1.62433
1.48514
1.35378
1.23212
1.12169
1.02358
0.938374
0.866168
0.806607
0.758961
0.72223
0.695249
0.676828
0.665879
0.661555
0.663356
0.671158
0.685111
0.705363
0.731674
0.763081
0.797828
0.833631
0.868137
0.899351
0.92588
0.946975
0.962437
0.972461
0.977495
0.978109
0.974917
0.968518
0.959465
0.948255
0.935318
0.921025
0.905689
0.889573
0.872897
0.855842
0.838558
0.821169
0.803772
0.78645
0.769259
0.752245
0.735453
0.718924
0.702687
0.686767
0.67118
0.655942
0.641061
0.626542
0.61239
0.598603
0.585182
0.572122
0.559419
0.547068
0.535061
0.523393
0.512054
0.501036
0.490331
0.479931
0.469826
0.460007
0.450465
0.441191
0.432178
0.423415
0.414895
0.406609
0.39855
0.390709
0.38308
0.375655
0.368426
0.361388
0.354535
0.347859
0.341356
0.33502
0.328846
0.322829
0.316966
0.311251
0.305681
0.300253
0.294965
0.289812
0.284795
0.27991
0.275156
0.270534
0.266043
0.261684
0.257457
0.253367
0.249415
0.245606
0.241946
0.238442
0.235103
0.23194
0.228967
0.2262
0.22366
0.221372
0.219367
0.217683
0.216366
0.215474
0.21508
0.215276
0.216176
0.217925
0.220706
0.224747
0.230323
0.237755
0.2474
0.259622
0.274765
0.293116
0.314909
0.340377
0.3699
0.404259
0.445051
0.495308
0.560275
0.644981
0.737906
0.757829
0.435611
0.278094
0.54221
0.709742
0.857885
1.00604
1.15966
1.32115
1.49127
1.66986
1.8559
2.04806
2.24463
2.44357
2.64274
2.83995
3.03313
3.22035
3.3999
3.57034
3.73045
3.87929
4.01614
4.1405
4.25209
4.35076
4.43654
4.50953
4.56995
4.61806
4.65417
4.67862
4.69174
4.69391
4.68553
4.66704
4.63891
4.60164
4.55574
4.50167
4.43983
4.37061
4.29393
4.20973
4.11884
4.02193
3.9196
3.81237
3.70058
3.58453
3.4644
3.34035
3.21245
3.08071
2.94502
2.80535
2.66171
2.51426
2.36339
2.20984
2.05474
1.89964
1.74644
1.59735
1.45464
1.32053
1.19697
1.08551
0.987181
0.902478
0.831366
0.773336
0.727499
0.692688
0.667581
0.650827
0.641174
0.637599
0.639419
0.646348
0.658458
0.675998
0.69906
0.72722
0.759317
0.793521
0.827677
0.859738
0.888087
0.91169
0.930069
0.943195
0.951344
0.954977
0.954636
0.950884
0.94426
0.935257
0.924316
0.911817
0.898091
0.883414
0.868023
0.852113
0.835848
0.819363
0.802768
0.786153
0.769581
0.753113
0.736803
0.720697
0.704831
0.689234
0.673931
0.658937
0.644266
0.629928
0.615928
0.60227
0.588954
0.57598
0.563346
0.551047
0.539078
0.527434
0.516108
0.505093
0.494381
0.483965
0.473837
0.463987
0.45441
0.445095
0.436034
0.427221
0.418646
0.410302
0.402182
0.394278
0.386583
0.379089
0.371791
0.364682
0.357757
0.351008
0.344431
0.33802
0.331771
0.32568
0.31974
0.31395
0.308305
0.302802
0.297438
0.29221
0.287118
0.282158
0.27733
0.272633
0.268067
0.263633
0.259332
0.255165
0.251137
0.247251
0.243513
0.239928
0.236507
0.233258
0.230196
0.227336
0.224698
0.222305
0.220187
0.21838
0.21693
0.215891
0.215335
0.21535
0.21605
0.21758
0.220124
0.223917
0.229248
0.236466
0.24597
0.258184
0.273523
0.292358
0.314998
0.341747
0.37305
0.409796
0.453823
0.508663
0.58038
0.674538
0.778334
0.802914
0.46266
0.270641
0.529982
0.697764
0.846339
0.994083
1.14645
1.30583
1.47314
1.64842
1.83092
2.01952
2.21266
2.4085
2.60501
2.8001
2.99175
3.17805
3.3573
3.52799
3.68888
3.83895
3.97741
4.10371
4.21747
4.31851
4.40676
4.48232
4.54535
4.59606
4.63476
4.66173
4.6773
4.68181
4.67566
4.65925
4.63306
4.59759
4.55335
4.5008
4.44038
4.37245
4.29677
4.21372
4.12395
4.02809
3.92667
3.82015
3.70883
3.59297
3.47273
3.34819
3.21939
3.08625
2.94863
2.80644
2.65968
2.50852
2.35343
2.19529
2.03542
1.87561
1.71803
1.56511
1.41934
1.28305
1.15826
1.04649
0.948694
0.865241
0.79593
0.74008
0.696623
0.664229
0.641422
0.626711
0.618712
0.61627
0.618562
0.625163
0.636043
0.65145
0.671659
0.696635
0.725744
0.75766
0.790566
0.822511
0.851779
0.877115
0.89779
0.913539
0.924454
0.930857
0.9332
0.931989
0.927731
0.920908
0.911957
0.901265
0.889172
0.875966
0.861897
0.847172
0.831968
0.816431
0.800681
0.784814
0.768903
0.753023
0.737235
0.721592
0.706135
0.6909
0.675913
0.661198
0.64677
0.632643
0.618826
0.605325
0.592143
0.579281
0.56674
0.554518
0.54261
0.531013
0.519721
0.508729
0.498031
0.487618
0.477485
0.467624
0.458028
0.448688
0.439598
0.430749
0.422135
0.413748
0.405581
0.397627
0.389879
0.382332
0.374977
0.36781
0.360824
0.354014
0.347375
0.340901
0.334588
0.328432
0.322428
0.316572
0.310862
0.305293
0.299863
0.29457
0.289411
0.284386
0.279491
0.274728
0.270096
0.265595
0.261226
0.256992
0.252895
0.248939
0.245129
0.241471
0.237974
0.234647
0.231503
0.228556
0.225826
0.223335
0.221111
0.219189
0.217611
0.216433
0.215721
0.215563
0.216069
0.217383
0.21969
0.223229
0.228303
0.235286
0.244617
0.256785
0.272286
0.291585
0.315091
0.343195
0.376429
0.415797
0.463361
0.523121
0.60192
0.705865
0.820939
0.850424
0.491186
0.263436
0.517413
0.685432
0.834303
0.981433
1.13218
1.2889
1.45267
1.62374
1.80166
1.98553
2.17403
2.3655
2.55808
2.74979
2.9387
3.12294
3.3008
3.47078
3.63158
3.78212
3.92155
4.04925
4.16478
4.26787
4.35842
4.43645
4.50207
4.55548
4.59693
4.62667
4.64501
4.65227
4.64878
4.63496
4.61125
4.57815
4.53617
4.48577
4.42741
4.36138
4.28742
4.20621
4.11827
4.02415
3.92432
3.81916
3.70896
3.59389
3.47407
3.34951
3.22017
3.08588
2.94646
2.80176
2.6518
2.49678
2.3373
2.17444
2.00977
1.84534
1.68362
1.52728
1.37899
1.2412
1.11594
1.00468
0.908243
0.82683
0.76005
0.707023
0.666496
0.636969
0.61682
0.604436
0.598331
0.59726
0.600304
0.606939
0.61704
0.630815
0.648613
0.670653
0.696711
0.725946
0.756942
0.787971
0.817338
0.843657
0.865989
0.883852
0.897142
0.906029
0.910852
0.912043
0.910064
0.905368
0.898383
0.889495
0.879046
0.867333
0.854613
0.841107
0.827
0.812448
0.79758
0.782498
0.767289
0.752036
0.736809
0.721666
0.706655
0.691816
0.677181
0.662776
0.648623
0.634737
0.621132
0.607815
0.594793
0.58207
0.569647
0.557525
0.545701
0.534173
0.522937
0.511989
0.501323
0.490933
0.480814
0.470958
0.46136
0.452012
0.442907
0.434039
0.425401
0.416985
0.408786
0.400796
0.393009
0.385419
0.378021
0.370807
0.363773
0.356914
0.350223
0.343697
0.337331
0.331121
0.325062
0.319151
0.313384
0.307759
0.302273
0.296922
0.291706
0.286623
0.281671
0.276849
0.272158
0.267597
0.263169
0.258873
0.254714
0.250695
0.24682
0.243095
0.239528
0.236128
0.232908
0.229881
0.227065
0.224482
0.222158
0.220127
0.21843
0.217119
0.216259
0.215934
0.216253
0.217356
0.21943
0.222714
0.227523
0.234252
0.243383
0.255466
0.271089
0.290827
0.315206
0.344731
0.380044
0.422276
0.473712
0.53879
0.625101
0.739304
0.86622
0.90094
0.521456
0.256296
0.504681
0.672957
0.822018
0.968333
1.11709
1.27064
1.43019
1.59623
1.76862
1.94671
2.12943
2.31536
2.5028
2.68995
2.87497
3.05605
3.2315
3.39983
3.55968
3.70995
3.84972
3.9783
4.09517
4.20001
4.29264
4.37303
4.44124
4.49743
4.54178
4.57455
4.59598
4.60635
4.60599
4.59525
4.57457
4.54443
4.50531
4.45769
4.40203
4.33847
4.26699
4.18831
4.10287
4.01114
3.91352
3.81033
3.70178
3.58799
3.46898
3.3447
3.215
3.07964
2.93836
2.79101
2.63761
2.47848
2.31441
2.14669
1.97719
1.80831
1.64278
1.48354
1.33342
1.19493
1.07009
0.960237
0.866048
0.787504
0.723995
0.674424
0.637342
0.611077
0.593873
0.584015
0.579946
0.580363
0.5843
0.591175
0.600797
0.613322
0.629118
0.648543
0.671672
0.69807
0.726733
0.756233
0.784999
0.811608
0.834977
0.854433
0.869684
0.880737
0.887807
0.891235
0.891421
0.888782
0.883723
0.87662
0.867813
0.857604
0.846255
0.833993
0.821013
0.807479
0.793527
0.779265
0.764795
0.750207
0.735577
0.72097
0.706439
0.69203
0.677778
0.663716
0.649868
0.636253
0.622887
0.609782
0.596946
0.584387
0.572106
0.560107
0.54839
0.536953
0.525793
0.514909
0.504295
0.493946
0.483859
0.474026
0.464443
0.455103
0.446
0.437128
0.428479
0.420049
0.411831
0.403818
0.396005
0.388386
0.380956
0.373708
0.366637
0.359739
0.353008
0.34644
0.340031
0.333776
0.327672
0.321715
0.315902
0.310229
0.304694
0.299295
0.294029
0.288896
0.283893
0.27902
0.274277
0.269664
0.265182
0.260832
0.256617
0.25254
0.248605
0.244819
0.241189
0.237722
0.234431
0.231329
0.228433
0.225764
0.223346
0.221213
0.219403
0.217966
0.216966
0.216482
0.216622
0.217522
0.219366
0.222397
0.226937
0.233399
0.242302
0.254264
0.269967
0.29011
0.315359
0.346363
0.383898
0.429244
0.484915
0.555777
0.650143
0.775234
0.914765
0.955171
0.553803
0.249101
0.491928
0.660507
0.809669
0.954967
1.1014
1.2513
1.40604
1.56633
1.73234
1.90371
2.07965
2.25898
2.44022
2.62174
2.8018
2.97869
3.15078
3.31654
3.47464
3.62392
3.7634
3.89233
4.01013
4.11639
4.21089
4.29352
4.36429
4.4233
4.47071
4.50672
4.53154
4.5454
4.54858
4.54141
4.52429
4.49767
4.46202
4.4178
4.36545
4.30492
4.23665
4.16116
4.07884
3.99008
3.89521
3.79447
3.688
3.57582
3.45787
3.334
3.20394
3.06738
2.92402
2.77371
2.61653
2.45298
2.28404
2.11134
1.93709
1.76401
1.59515
1.43368
1.28255
1.14431
1.02087
0.913431
0.822432
0.747611
0.688109
0.642598
0.609423
0.58675
0.572702
0.565484
0.563496
0.56542
0.570282
0.577481
0.586795
0.598337
0.612464
0.629602
0.65002
0.673599
0.69971
0.727252
0.754858
0.781153
0.804979
0.825518
0.842306
0.85519
0.864251
0.869728
0.871947
0.871276
0.868087
0.862738
0.855563
0.846859
0.836893
0.825895
0.814067
0.80158
0.788573
0.775164
0.761468
0.747579
0.733581
0.719544
0.705527
0.691579
0.677744
0.664054
0.65054
0.637225
0.624126
0.611259
0.598636
0.586265
0.574151
0.562298
0.550709
0.539384
0.528322
0.517521
0.506978
0.496689
0.486651
0.476859
0.467308
0.457992
0.448905
0.440043
0.4314
0.422969
0.414746
0.406723
0.398897
0.391261
0.38381
0.376539
0.369443
0.362517
0.355756
0.349157
0.342714
0.336424
0.330284
0.324289
0.318438
0.312725
0.30715
0.30171
0.296403
0.291226
0.28618
0.281263
0.276474
0.271815
0.267285
0.262887
0.258622
0.254493
0.250505
0.246663
0.242973
0.239445
0.236089
0.232917
0.229947
0.227196
0.224691
0.222462
0.220545
0.21899
0.217856
0.217222
0.21719
0.217895
0.219518
0.2223
0.226568
0.232753
0.241408
0.253213
0.268952
0.289459
0.315566
0.348092
0.387984
0.436702
0.497007
0.574188
0.677275
0.814083
0.967263
1.01398
0.588633
0.241774
0.479268
0.648208
0.797382
0.941451
1.08523
1.23107
1.38048
1.53443
1.69337
1.85726
2.02558
2.19742
2.37154
2.54648
2.72065
2.89245
3.06028
3.22266
3.37825
3.52584
3.66443
3.79319
3.91149
4.01885
4.11498
4.1997
4.27297
4.33483
4.3854
4.42484
4.45329
4.47097
4.47808
4.47493
4.46186
4.43929
4.40767
4.36744
4.31895
4.26209
4.19761
4.12588
4.04721
3.96191
3.87023
3.77231
3.66818
3.5578
3.44097
3.31743
3.18682
3.04876
2.90294
2.74922
2.58783
2.41947
2.24544
2.06769
1.88884
1.71197
1.54042
1.37756
1.22643
1.08951
0.968598
0.864643
0.777821
0.707586
0.652803
0.611903
0.58303
0.564196
0.553424
0.548868
0.548921
0.552286
0.558017
0.565533
0.57461
0.585337
0.598049
0.613196
0.631158
0.652046
0.675543
0.700867
0.726896
0.752375
0.776142
0.79728
0.815187
0.829558
0.840342
0.847664
0.851767
0.852957
0.851569
0.847933
0.842367
0.835163
0.826586
0.816867
0.806211
0.794794
0.782758
0.770235
0.757345
0.744189
0.730856
0.717421
0.70395
0.690496
0.677106
0.66382
0.650669
0.637681
0.624877
0.612275
0.599889
0.58773
0.575806
0.564123
0.552685
0.541493
0.530548
0.51985
0.509397
0.499187
0.489217
0.479482
0.469978
0.460702
0.451648
0.442812
0.434187
0.42577
0.417554
0.409535
0.401708
0.394067
0.386607
0.379324
0.372213
0.36527
0.358489
0.351868
0.345401
0.339086
0.332918
0.326895
0.321013
0.315269
0.309661
0.304187
0.298845
0.293633
0.28855
0.283595
0.278767
0.274068
0.269496
0.265055
0.260745
0.25657
0.252533
0.24864
0.244897
0.241313
0.237896
0.23466
0.231619
0.228793
0.226206
0.223886
0.221869
0.220202
0.218942
0.218166
0.217972
0.218491
0.2199
0.22244
0.226439
0.23234
0.240727
0.252343
0.268071
0.288896
0.315835
0.349915
0.392293
0.444646
0.510011
0.594126
0.706741
0.856328
1.02452
1.07843
0.62646
0.234277
0.466798
0.636149
0.785223
0.927833
1.06864
1.21007
1.35375
1.50091
1.65229
1.80813
1.96822
2.13189
2.29816
2.46576
2.63326
2.79918
2.96198
3.12024
3.27261
3.41788
3.555
3.6831
3.80147
3.90959
4.00708
4.09371
4.16938
4.23408
4.28787
4.33085
4.36315
4.3849
4.39628
4.39752
4.38893
4.37089
4.3438
4.30806
4.26385
4.21128
4.15112
4.08362
4.00902
3.92754
3.83933
3.74442
3.64275
3.53414
3.41829
3.29481
3.16327
3.02323
2.87438
2.7167
2.5506
2.37705
2.19772
2.01498
1.83184
1.65177
1.47838
1.31518
1.16522
1.03089
0.913714
0.814386
0.732749
0.667941
0.618539
0.582726
0.558456
0.543608
0.536132
0.534167
0.536137
0.540802
0.547281
0.555042
0.563882
0.573887
0.585367
0.59877
0.614535
0.632921
0.653853
0.676836
0.701006
0.725276
0.748539
0.769837
0.788462
0.80398
0.816208
0.825158
0.830982
0.833917
0.834247
0.832272
0.828285
0.822569
0.815379
0.806948
0.797481
0.787155
0.776116
0.764508
0.752454
0.740062
0.727425
0.714624
0.70173
0.6888
0.675887
0.663033
0.650274
0.637641
0.625159
0.612848
0.600725
0.588803
0.577092
0.565601
0.554335
0.543298
0.532491
0.521916
0.511573
0.501459
0.491573
0.481913
0.472474
0.463254
0.454247
0.445451
0.436859
0.428469
0.420275
0.412272
0.404456
0.396821
0.389365
0.382081
0.374966
0.368015
0.361225
0.354591
0.34811
0.341778
0.335591
0.329547
0.323643
0.317876
0.312243
0.306743
0.301372
0.296131
0.291018
0.286031
0.28117
0.276436
0.271829
0.26735
0.263
0.258784
0.254704
0.250765
0.246973
0.243336
0.239864
0.236568
0.233462
0.230566
0.227902
0.225497
0.223386
0.221613
0.220236
0.219325
0.218977
0.21932
0.220526
0.222832
0.226566
0.232181
0.240286
0.25168
0.267352
0.288441
0.316174
0.351824
0.396802
0.453054
0.523938
0.615679
0.738789
0.902502
1.0875
1.14983
0.667929
0.226597
0.454619
0.624398
0.773207
0.914093
1.05165
1.18837
1.32608
1.4662
1.60973
1.7572
1.90867
2.06375
2.22166
2.38137
2.54162
2.70102
2.85818
3.01168
3.16022
3.30258
3.43769
3.56463
3.68266
3.79117
3.88974
3.97806
4.05598
4.12343
4.18043
4.22702
4.26329
4.2893
4.30519
4.31113
4.30737
4.29423
4.27207
4.24122
4.20162
4.15386
4.09841
4.03547
3.96522
3.88775
3.80312
3.71124
3.61192
3.50485
3.38961
3.26571
3.13264
2.98995
2.83739
2.67511
2.50376
2.32467
2.13995
1.95243
1.76555
1.58311
1.40897
1.24671
1.09931
0.968933
0.856823
0.7633
0.687847
0.629258
0.585814
0.55546
0.535978
0.525146
0.520877
0.521339
0.525026
0.530792
0.537848
0.545737
0.554296
0.563617
0.573995
0.585858
0.599663
0.615761
0.634251
0.654878
0.677024
0.699796
0.722191
0.743259
0.762229
0.77856
0.791953
0.802307
0.809678
0.81423
0.816187
0.815809
0.813364
0.809114
0.803307
0.796168
0.787902
0.778684
0.768666
0.758
0.746811
0.735213
0.723303
0.711167
0.69888
0.686506
0.674099
0.661706
0.649368
0.637118
0.624984
0.612989
0.601154
0.589494
0.57802
0.566744
0.555673
0.544811
0.534163
0.523731
0.513515
0.503517
0.493734
0.484165
0.474807
0.465658
0.456715
0.447973
0.43943
0.43108
0.42292
0.414946
0.407153
0.399538
0.392095
0.384822
0.377713
0.370766
0.363976
0.357339
0.350852
0.344512
0.338315
0.332259
0.326341
0.320557
0.314907
0.309387
0.303996
0.298732
0.293594
0.288582
0.283694
0.278931
0.274293
0.269782
0.265398
0.261146
0.257027
0.253047
0.249211
0.245527
0.242003
0.238652
0.235487
0.232525
0.229789
0.227304
0.225105
0.223234
0.221746
0.220709
0.220217
0.220393
0.221405
0.223487
0.226964
0.232293
0.240105
0.251251
0.266817
0.288111
0.316588
0.353805
0.401481
0.46189
0.538775
0.638918
0.773659
0.953186
1.1573
1.22983
0.713866
0.218754
0.442865
0.613019
0.761312
0.900169
1.0342
1.16605
1.2977
1.43074
1.5664
1.70546
1.84822
1.99453
2.14386
2.29537
2.44797
2.60043
2.75144
2.89968
3.04387
3.18281
3.31542
3.44074
3.558
3.66653
3.76584
3.85559
3.93556
4.00561
4.06572
4.11589
4.15615
4.18651
4.20705
4.21787
4.21916
4.21119
4.19424
4.16851
4.13387
4.09117
4.04066
3.98245
3.91663
3.84318
3.76202
3.67295
3.57563
3.46963
3.35441
3.22938
3.09399
2.94781
2.79075
2.62316
2.44605
2.2612
2.07118
1.87933
1.68951
1.50583
1.33232
1.17254
1.02926
0.904364
0.798696
0.712159
0.643835
0.592166
0.555138
0.530481
0.515834
0.508914
0.507651
0.510286
0.515425
0.522048
0.529482
0.537359
0.545567
0.554213
0.563578
0.574063
0.586123
0.600151
0.616365
0.634697
0.654749
0.675829
0.69707
0.717576
0.73655
0.753379
0.76766
0.77919
0.787928
0.793957
0.79744
0.798589
0.797639
0.794829
0.790392
0.784546
0.777488
0.769392
0.760419
0.75072
0.740424
0.729649
0.718496
0.707055
0.695406
0.683616
0.671745
0.659843
0.647953
0.636114
0.624355
0.612704
0.601182
0.589807
0.578595
0.567557
0.556702
0.546037
0.535568
0.525299
0.515231
0.505366
0.495704
0.486244
0.476984
0.467923
0.459058
0.450386
0.441905
0.43361
0.425498
0.417565
0.409808
0.402224
0.394807
0.387555
0.380464
0.373529
0.366749
0.360119
0.353636
0.347296
0.341098
0.335038
0.329113
0.323321
0.31766
0.312128
0.306722
0.301443
0.296287
0.291255
0.286346
0.28156
0.276897
0.272359
0.267947
0.263663
0.25951
0.255494
0.251619
0.247892
0.244322
0.24092
0.2377
0.234678
0.231875
0.229317
0.227036
0.225073
0.22348
0.222326
0.221699
0.221718
0.222548
0.224416
0.227644
0.232691
0.240203
0.251075
0.266491
0.287922
0.317079
0.355838
0.406287
0.471099
0.554482
0.663876
0.81157
1.009
1.23519
1.32049
0.765344
0.210796
0.431749
0.602111
0.749513
0.886009
1.01633
1.14329
1.26899
1.39518
1.52325
1.65414
1.78839
1.92606
2.06683
2.21007
2.35487
2.50014
2.64469
2.78729
2.92671
3.06178
3.19143
3.31469
3.43074
3.53889
3.63859
3.72944
3.81116
3.88358
3.94661
4.00022
4.04438
4.07906
4.10424
4.12
4.12644
4.12375
4.11212
4.09147
4.06207
4.02451
3.97895
3.92543
3.86389
3.79422
3.71619
3.62945
3.53353
3.42786
3.3118
3.18469
3.04598
2.89536
2.73293
2.55936
2.3761
2.18543
1.99048
1.79507
1.60347
1.42004
1.24884
1.0933
0.955907
0.8381
0.740269
0.661849
0.601502
0.557314
0.527003
0.508112
0.498188
0.494942
0.496374
0.50085
0.507129
0.514341
0.521943
0.529661
0.537439
0.545401
0.553817
0.56306
0.573561
0.58573
0.599854
0.616004
0.633968
0.653244
0.673121
0.692789
0.711466
0.728496
0.743397
0.755872
0.765789
0.773148
0.778047
0.780644
0.781134
0.779731
0.776648
0.772089
0.766246
0.759285
0.751377
0.742668
0.733291
0.723366
0.712999
0.702284
0.691302
0.680127
0.668821
0.657438
0.646026
0.634625
0.623269
0.611987
0.600804
0.58974
0.578813
0.568036
0.55742
0.546975
0.536706
0.52662
0.516719
0.507007
0.497484
0.48815
0.479005
0.470048
0.461278
0.452691
0.444286
0.436059
0.428009
0.420131
0.412423
0.404881
0.397502
0.390283
0.383219
0.376309
0.369549
0.362935
0.356464
0.350135
0.343943
0.337887
0.331964
0.326172
0.320507
0.31497
0.309557
0.304268
0.299101
0.294056
0.289132
0.284329
0.279647
0.275087
0.270651
0.266341
0.26216
0.258112
0.254202
0.250438
0.246827
0.24338
0.240109
0.237032
0.234168
0.231541
0.229184
0.227136
0.225447
0.224182
0.223429
0.223302
0.223961
0.225627
0.228617
0.233388
0.240597
0.251174
0.266392
0.287891
0.317646
0.357897
0.411162
0.4806
0.570979
0.690539
0.852688
1.07058
1.3226
1.42449
0.823781
0.20279
0.421623
0.591855
0.737895
0.871765
0.998364
1.12067
1.24086
1.36073
1.48176
1.60504
1.73125
1.86066
1.99315
2.12826
2.26528
2.40327
2.54118
2.67786
2.81217
2.94299
3.06925
3.19
3.3044
3.41172
3.51138
3.60293
3.68604
3.76049
3.82614
3.88292
3.93076
3.96956
3.99925
4.01983
4.03133
4.03385
4.02744
4.01177
3.98753
3.95498
3.91416
3.865
3.80735
3.74094
3.66539
3.58021
3.48479
3.37843
3.26041
3.13007
2.9869
2.83079
2.66212
2.48201
2.29241
2.09619
1.89704
1.69926
1.50746
1.32614
1.15927
1.00998
0.880346
0.771278
0.682633
0.61335
0.561667
0.525336
0.501843
0.488598
0.483115
0.483168
0.486889
0.492819
0.499899
0.507426
0.514988
0.522406
0.529678
0.536945
0.544463
0.552576
0.561685
0.57219
0.584417
0.598531
0.614467
0.631897
0.650267
0.668877
0.686996
0.703955
0.719218
0.73241
0.743313
0.751849
0.758043
0.761999
0.763869
0.763833
0.762079
0.758799
0.754171
0.748358
0.741531
0.733834
0.725402
0.716354
0.706801
0.69684
0.686556
0.676025
0.665314
0.65448
0.643574
0.63264
0.621714
0.610828
0.60001
0.589283
0.578665
0.568173
0.557819
0.547615
0.537568
0.527686
0.517973
0.508432
0.499066
0.489877
0.480865
0.472029
0.463368
0.454882
0.446569
0.438426
0.430451
0.422641
0.414995
0.407508
0.400179
0.393003
0.385979
0.379103
0.372373
0.365785
0.359337
0.353027
0.346851
0.340808
0.334894
0.329109
0.323449
0.317914
0.312501
0.30721
0.302038
0.296986
0.292053
0.287239
0.282543
0.277968
0.273514
0.269183
0.264979
0.260905
0.256967
0.25317
0.249522
0.246035
0.24272
0.239593
0.236672
0.233984
0.231557
0.22943
0.227651
0.226285
0.225415
0.225152
0.22565
0.227126
0.22989
0.234393
0.241299
0.251565
0.266541
0.288032
0.31829
0.359952
0.416038
0.490281
0.588135
0.718812
0.897084
1.13852
1.42104
1.54541
0.891095
0.194781
0.413056
0.58272
0.727102
0.858476
0.981833
1.10019
1.21569
1.33011
1.44494
1.56135
1.68018
1.80184
1.92641
2.05364
2.18297
2.31366
2.44476
2.57528
2.70415
2.8303
2.95273
3.07049
3.18272
3.2887
3.38781
3.47955
3.56356
3.63956
3.7074
3.76693
3.81805
3.86062
3.8945
3.91959
3.93587
3.94331
3.94165
3.9308
3.91135
3.88337
3.84678
3.80139
3.7469
3.6829
3.60887
3.52415
3.42802
3.31967
3.19834
3.06342
2.91456
2.75191
2.57627
2.38928
2.1935
1.99242
1.79029
1.59185
1.40191
1.22496
1.06473
0.923897
0.803962
0.70524
0.627004
0.56769
0.525133
0.496797
0.47999
0.47206
0.470571
0.473431
0.478966
0.485931
0.49347
0.501049
0.508384
0.515379
0.522081
0.528643
0.535308
0.54239
0.550254
0.559277
0.569796
0.582037
0.596045
0.61164
0.62842
0.645809
0.66315
0.679793
0.695173
0.708856
0.72055
0.730099
0.737461
0.742679
0.745858
0.74714
0.746687
0.744671
0.741254
0.736598
0.730867
0.724202
0.716737
0.708593
0.699882
0.690704
0.681147
0.67129
0.661204
0.650949
0.640578
0.630139
0.619671
0.609209
0.598782
0.588417
0.578134
0.567951
0.557883
0.547943
0.53814
0.528483
0.518977
0.509629
0.50044
0.491414
0.482551
0.473853
0.46532
0.456951
0.448745
0.4407
0.432815
0.425087
0.417515
0.410097
0.402829
0.39571
0.388736
0.381906
0.375216
0.368665
0.36225
0.355968
0.349817
0.343795
0.3379
0.33213
0.326483
0.320958
0.315552
0.310265
0.305096
0.300044
0.295108
0.290289
0.285587
0.281002
0.276536
0.27219
0.267969
0.263875
0.259912
0.256089
0.252411
0.248889
0.245534
0.242363
0.239393
0.236648
0.234158
0.231959
0.230099
0.228639
0.227661
0.227272
0.227622
0.22892
0.23147
0.235716
0.242321
0.252262
0.266956
0.28836
0.319008
0.361971
0.420832
0.500004
0.605758
0.748496
0.944674
1.21325
1.53201
1.68827
0.969958
0.186703
0.407206
0.576651
0.72037
0.850707
0.972415
1.08832
1.20041
1.3104
1.41983
1.52997
1.6418
1.75595
1.87269
1.99198
2.11346
2.23655
2.36049
2.48439
2.60729
2.7282
2.84618
2.96029
3.0697
3.17366
3.27155
3.36282
3.44707
3.524
3.59338
3.65507
3.7089
3.75467
3.79219
3.82128
3.84181
3.85358
3.85598
3.84944
3.83411
3.80995
3.77673
3.73415
3.68176
3.61899
3.54515
3.45947
3.36109
3.24919
3.12301
2.98207
2.82629
2.6562
2.47312
2.27933
2.07804
1.87338
1.67011
1.47328
1.28778
1.11787
0.966825
0.836734
0.728407
0.641491
0.574664
0.525882
0.492626
0.472137
0.461627
0.458476
0.460384
0.465471
0.47231
0.479893
0.487569
0.494966
0.501912
0.508386
0.51447
0.520326
0.526182
0.532321
0.53907
0.546777
0.555771
0.566309
0.578517
0.592332
0.60749
0.623544
0.639929
0.656039
0.671308
0.685261
0.697547
0.707942
0.716333
0.722706
0.727113
0.729657
0.730469
0.729695
0.727481
0.723987
0.719363
0.713748
0.707272
0.700057
0.692215
0.683849
0.675048
0.665896
0.656464
0.646817
0.637011
0.627096
0.617115
0.607104
0.597096
0.587119
0.577196
0.567347
0.557589
0.547937
0.5384
0.52899
0.519714
0.510577
0.501585
0.492741
0.484048
0.475506
0.467117
0.458881
0.450798
0.442866
0.435086
0.427455
0.419972
0.412635
0.405442
0.398391
0.39148
0.384707
0.378069
0.371565
0.365192
0.358949
0.352832
0.346841
0.340974
0.335228
0.329602
0.324094
0.318704
0.313429
0.30827
0.303225
0.298294
0.293477
0.288774
0.284186
0.279714
0.27536
0.271127
0.267019
0.263039
0.259194
0.255492
0.251941
0.248554
0.245345
0.242331
0.239537
0.23699
0.234726
0.232792
0.231248
0.23017
0.229666
0.229879
0.231013
0.233362
0.237363
0.243672
0.253278
0.267652
0.288889
0.319803
0.363919
0.425451
0.509594
0.623584
0.779248
0.995134
1.29489
1.65673
1.86064
1.06423
0.178583
0.408561
0.583631
0.732758
0.867133
0.990821
1.10638
1.2159
1.32136
1.42465
1.52741
1.63097
1.73623
1.84376
1.95375
2.06605
2.18028
2.29582
2.41192
2.5277
2.64227
2.75471
2.86412
2.96969
3.07064
3.16633
3.2562
3.33978
3.41674
3.48682
3.54981
3.60553
3.6537
3.69407
3.72636
3.75032
3.76536
3.77108
3.76787
3.75563
3.73416
3.7031
3.662
3.61024
3.54711
3.47179
3.38339
3.281
3.16378
3.03112
2.88276
2.719
2.54088
2.35039
2.15046
1.94504
1.73882
1.53698
1.3447
1.16669
1.00679
0.867632
0.750538
0.655554
0.581632
0.526885
0.488844
0.464716
0.451611
0.446753
0.44766
0.452262
0.458955
0.466589
0.4744
0.48193
0.488949
0.495382
0.501262
0.506699
0.511858
0.516949
0.522227
0.527979
0.53452
0.54216
0.551167
0.561717
0.573839
0.587387
0.602038
0.61733
0.632722
0.647669
0.661679
0.674358
0.685422
0.694696
0.702105
0.70765
0.711389
0.713419
0.71386
0.712837
0.710504
0.706996
0.702444
0.696977
0.690714
0.683768
0.67624
0.668226
0.659807
0.65106
0.642052
0.632841
0.623479
0.614013
0.604482
0.594921
0.585359
0.575823
0.566334
0.556911
0.547569
0.538323
0.529182
0.520157
0.511254
0.502479
0.493837
0.485331
0.476964
0.468737
0.460652
0.452708
0.444907
0.437246
0.429727
0.422347
0.415105
0.408
0.401031
0.394195
0.387491
0.380916
0.374471
0.368151
0.361957
0.355885
0.349935
0.344104
0.338391
0.332795
0.327313
0.321947
0.316693
0.311551
0.306521
0.301602
0.296794
0.292098
0.287513
0.283043
0.278688
0.27445
0.270334
0.266343
0.262484
0.258764
0.255191
0.251777
0.248537
0.245487
0.24265
0.240053
0.237733
0.235733
0.234112
0.232945
0.232336
0.232424
0.233406
0.235568
0.239336
0.245357
0.254624
0.268644
0.289634
0.320675
0.365762
0.429794
0.518847
0.641269
0.810547
1.0478
1.38291
1.79585
2.07458
1.17973
0.173595
0.444148
0.656981
0.833148
0.982529
1.11088
1.22312
1.32374
1.41679
1.50566
1.59305
1.68094
1.77071
1.86318
1.95874
2.0574
2.15887
2.2626
2.3679
2.47393
2.5798
2.68461
2.78743
2.88741
2.98377
3.07579
3.16286
3.24447
3.32022
3.38979
3.45293
3.5094
3.55887
3.60098
3.63538
3.66154
3.67846
3.68635
3.68512
3.67453
3.65422
3.62369
3.58232
3.52936
3.46394
3.38515
3.29202
3.18364
3.05931
2.91862
2.76168
2.5893
2.40319
2.20602
2.0015
1.79421
1.58935
1.39227
1.20807
1.04103
0.894304
0.769688
0.667636
0.587393
0.527249
0.484815
0.457296
0.441733
0.435232
0.435154
0.439266
0.445808
0.453494
0.461455
0.469156
0.47631
0.482801
0.488629
0.49387
0.49865
0.503129
0.507503
0.511995
0.516858
0.522369
0.528814
0.536457
0.5455
0.556039
0.568025
0.581246
0.595347
0.609867
0.624303
0.638168
0.651041
0.662592
0.672592
0.680907
0.687486
0.692343
0.695536
0.697155
0.697306
0.696126
0.693735
0.690259
0.685817
0.680527
0.674501
0.667841
0.66064
0.652986
0.644954
0.636614
0.628028
0.61925
0.610329
0.601307
0.59222
0.583102
0.573979
0.564876
0.555813
0.546808
0.537875
0.529028
0.520276
0.511628
0.503092
0.494672
0.486374
0.4782
0.470154
0.462237
0.454451
0.446796
0.439272
0.431879
0.424617
0.417486
0.410483
0.403608
0.39686
0.390238
0.38374
0.377364
0.37111
0.364975
0.358959
0.353059
0.347275
0.341605
0.336048
0.330602
0.325267
0.320042
0.314927
0.309919
0.305021
0.30023
0.295548
0.290975
0.286513
0.282164
0.277929
0.273812
0.269818
0.265952
0.26222
0.258633
0.2552
0.251935
0.248856
0.245984
0.243346
0.240976
0.238919
0.23723
0.235984
0.23528
0.235254
0.236097
0.238086
0.241635
0.24738
0.256305
0.269943
0.290608
0.32163
0.367472
0.43376
0.527537
0.65839
0.841661
1.10151
1.4756
1.94893
2.34886
1.32537
0.197238
0.679521
1.0359
1.27184
1.42696
1.53069
1.60339
1.65882
1.70621
1.75157
1.79873
1.84997
1.90659
1.96922
2.03797
2.1126
2.19259
2.2772
2.36554
2.45662
2.54937
2.64275
2.7357
2.82726
2.9165
3.00261
3.08486
3.16266
3.23548
3.30294
3.3647
3.42046
3.46978
3.5122
3.5472
3.57367
3.59109
3.59945
3.59844
3.58766
3.56658
3.53456
3.49082
3.43447
3.36456
3.28009
3.18012
3.06386
2.93079
2.78089
2.61477
2.4339
2.24071
2.03866
1.83216
1.62636
1.4267
1.2385
1.06642
0.914032
0.783578
0.675887
0.590493
0.525876
0.479746
0.449331
0.431638
0.423689
0.422734
0.426404
0.432813
0.440557
0.448677
0.456567
0.463888
0.47049
0.476349
0.481518
0.486099
0.490226
0.494054
0.497759
0.501537
0.505607
0.510213
0.515609
0.522045
0.529735
0.538816
0.549315
0.561117
0.573968
0.587495
0.601254
0.614784
0.627656
0.639508
0.650066
0.659146
0.666645
0.672529
0.676818
0.679569
0.680866
0.680825
0.679551
0.677156
0.673754
0.669458
0.664375
0.658607
0.652249
0.645387
0.638101
0.63046
0.62253
0.614366
0.60602
0.597537
0.588954
0.580307
0.571626
0.562935
0.554259
0.545616
0.537022
0.528492
0.520037
0.511667
0.503391
0.495215
0.487145
0.479185
0.471339
0.463609
0.455998
0.448507
0.441136
0.433886
0.426758
0.419752
0.412866
0.4061
0.399454
0.392927
0.386517
0.380224
0.374046
0.367983
0.362033
0.356195
0.350468
0.344851
0.339343
0.333943
0.32865
0.323463
0.318382
0.313407
0.308536
0.303771
0.299112
0.294559
0.290113
0.285777
0.281553
0.277443
0.273453
0.269587
0.265852
0.262257
0.258812
0.255531
0.25243
0.249531
0.24686
0.244449
0.242343
0.240596
0.23928
0.238491
0.238363
0.239079
0.240909
0.244254
0.249733
0.258319
0.271552
0.291822
0.322676
0.369032
0.437261
0.535431
0.674459
0.871633
1.15452
1.56943
2.11339
2.70448
1.51396
0.3786
1.63156
2.16369
2.35748
2.39192
2.36325
2.31466
2.26478
2.22174
2.18911
2.1684
2.16007
2.16408
2.18006
2.20741
2.24533
2.29286
2.34886
2.41209
2.48122
2.55488
2.63172
2.71043
2.78979
2.86864
2.94598
3.02088
3.09258
3.16041
3.22384
3.28242
3.33572
3.38318
3.42422
3.45781
3.48265
3.49868
3.50549
3.50262
3.48949
3.46543
3.42963
3.3812
3.31915
3.24248
3.15021
3.0415
2.9158
2.77295
2.61342
2.4385
2.25038
2.0523
1.84847
1.64389
1.44402
1.25429
1.07961
0.923879
0.789696
0.678218
0.58925
0.521455
0.472652
0.440114
0.420841
0.41181
0.410198
0.413547
0.419879
0.427706
0.435995
0.444082
0.451588
0.458332
0.464267
0.469433
0.473918
0.477841
0.481335
0.484547
0.487633
0.490764
0.49413
0.49794
0.502418
0.507793
0.514277
0.522031
0.531136
0.541557
0.553131
0.565576
0.578518
0.591538
0.60422
0.616187
0.627131
0.636824
0.645116
0.651928
0.657237
0.661066
0.663477
0.664559
0.664396
0.663088
0.660738
0.657454
0.653338
0.648491
0.643005
0.636967
0.630458
0.623549
0.616307
0.60879
0.601051
0.593136
0.585088
0.576943
0.568732
0.560483
0.552221
0.543966
0.535737
0.527549
0.519416
0.511348
0.503355
0.495444
0.487624
0.479898
0.472272
0.464749
0.457331
0.450022
0.442822
0.435732
0.428754
0.421888
0.415135
0.408493
0.401963
0.395544
0.389236
0.383038
0.37695
0.37097
0.365098
0.359333
0.353674
0.34812
0.342671
0.337326
0.332085
0.326945
0.321909
0.316974
0.312141
0.30741
0.302781
0.298256
0.293835
0.289521
0.285315
0.28122
0.277242
0.273384
0.269653
0.266059
0.26261
0.25932
0.256206
0.253287
0.25059
0.248147
0.246
0.244203
0.242825
0.241962
0.241741
0.242342
0.244025
0.247178
0.252404
0.260656
0.27347
0.293285
0.323828
0.370443
0.440237
0.542324
0.688967
0.899328
1.2044
1.65837
2.28035
3.13746
1.76302
1.71027
2.91483
1.70403
1.4798
1.40405
1.35298
1.30941
1.27046
1.23537
1.20369
1.17514
1.14944
1.12634
1.10556
1.08687
1.07006
1.05479
1.04118
1.02788
1.01884
1.71471
7.09077
4.15034
3.50143
3.12939
2.86676
2.65843
2.48417
2.33497
2.20553
2.09222
1.99232
1.90376
1.82482
1.75413
1.69055
1.6331
1.58097
1.53354
1.48838
1.89707
6.24035
4.97025
4.37233
3.94203
3.61188
3.34128
3.11184
2.91407
2.74187
2.59079
2.4574
2.33897
2.23327
2.13849
2.05309
1.97584
1.90556
1.84176
1.78123
2.13133
4.69787
4.94779
4.74024
4.40032
4.07766
3.79005
3.5364
3.3131
3.11621
2.94211
2.78758
2.64988
2.52666
2.41595
2.31604
2.22556
2.14312
2.06843
1.99801
2.34221
3.69869
4.47711
4.68842
4.55063
4.31212
4.05668
3.81176
3.58612
3.38166
3.1976
3.03223
2.88359
2.74973
2.62886
2.51938
2.41995
2.32913
2.24687
2.1697
2.51988
3.18585
3.92798
4.36792
4.4428
4.33489
4.15279
3.9479
3.74332
3.54892
3.36848
3.20291
3.05182
2.91421
2.78889
2.6746
2.57025
2.47452
2.38769
2.30645
2.65668
2.94084
3.46542
3.94537
4.16484
4.1874
4.09837
3.95539
3.79108
3.62259
3.45861
3.30324
3.15816
3.02375
2.89973
2.78546
2.68027
2.58312
2.49471
2.412
2.74851
2.82152
3.13958
3.54184
3.81401
3.92969
3.92805
3.8542
3.74124
3.61
3.47277
3.33656
3.20515
3.08044
2.96322
2.85365
2.75163
2.65652
2.56946
2.48785
2.796
2.7543
2.92102
3.20969
3.46437
3.62298
3.68458
3.67258
3.61227
3.5233
3.41913
3.30846
3.19671
3.0871
2.9815
2.88085
2.78568
2.69585
2.61294
2.53484
2.80264
2.70339
2.76811
2.95201
3.15473
3.3145
3.40904
3.44195
3.42701
3.37881
3.3093
3.22728
3.13887
3.04818
2.95785
2.86953
2.78434
2.7026
2.62632
2.55389
2.7733
2.65177
2.65119
2.75243
2.89591
3.03197
3.13321
3.19113
3.20903
3.19489
3.15728
3.1036
3.03966
2.96974
2.89691
2.82326
2.75036
2.67893
2.61129
2.54634
2.71337
2.59174
2.552
2.59373
2.68384
2.78628
2.87681
2.94229
2.97915
2.98947
2.97781
2.94926
2.90845
2.85921
2.80454
2.74668
2.68742
2.62776
2.5702
2.51406
2.62822
2.5203
2.45983
2.46172
2.50899
2.57752
2.64868
2.70948
2.75319
2.77788
2.78466
2.77608
2.75515
2.72476
2.68744
2.64525
2.59997
2.55273
2.50603
2.45949
2.52294
2.43689
2.36863
2.34556
2.36122
2.4004
2.45005
2.49938
2.5412
2.57159
2.58929
2.59472
2.58925
2.57461
2.55262
2.5249
2.49301
2.45801
2.42227
2.38557
2.40226
2.34226
2.27513
2.23801
2.23192
2.24807
2.27788
2.31333
2.34811
2.37786
2.4001
2.41381
2.41902
2.41639
2.4069
2.39162
2.37172
2.34809
2.32277
2.29563
2.27045
2.23783
2.17779
2.13446
2.1146
2.11399
2.12746
2.14944
2.17502
2.20026
2.22236
2.23958
2.25105
2.25658
2.25641
2.25104
2.2412
2.22748
2.21155
2.19321
2.1314
2.1254
2.07616
2.03217
2.00464
1.9928
1.99389
2.00434
2.02062
2.03958
2.05864
2.07586
2.08996
2.10019
2.10624
2.10811
2.1061
2.10048
2.09252
2.08187
1.98858
2.00697
1.97051
1.92965
1.89896
1.88041
1.8729
1.87435
1.88243
1.89482
1.90943
1.92448
1.93857
1.95071
1.96025
1.96683
1.9704
1.97095
1.96933
1.96503
1.84506
1.88462
1.86156
1.82627
1.79558
1.7739
1.76105
1.75608
1.75761
1.76408
1.77393
1.7857
1.79813
1.81017
1.82104
1.83018
1.83728
1.84212
1.84518
1.84579
1.70356
1.7604
1.75035
1.72202
1.69369
1.67138
1.65577
1.6467
1.64346
1.64512
1.65061
1.65884
1.66876
1.67945
1.69012
1.70012
1.70904
1.71649
1.72268
1.72681
1.56637
1.63634
1.63813
1.6173
1.59282
1.57166
1.55527
1.54401
1.53767
1.53579
1.53769
1.54263
1.54983
1.55854
1.56805
1.57775
1.58717
1.59587
1.60385
1.61023
1.43545
1.51431
1.52624
1.51279
1.49294
1.47412
1.45837
1.44642
1.43842
1.43419
1.43342
1.43564
1.44031
1.44688
1.45476
1.46344
1.47245
1.48137
1.49007
1.49766
1.3123
1.39602
1.41605
1.40938
1.39437
1.37858
1.36442
1.35288
1.34433
1.33883
1.33628
1.33645
1.33902
1.34358
1.34973
1.35704
1.36511
1.37357
1.38221
1.39017
1.19806
1.28296
1.30889
1.30804
1.29766
1.28516
1.27314
1.26275
1.25447
1.24855
1.24502
1.24383
1.2448
1.24769
1.25223
1.2581
1.26499
1.27258
1.28065
1.28842
1.0934
1.17633
1.20598
1.20976
1.20351
1.19421
1.18456
1.17572
1.16829
1.16256
1.1587
1.15674
1.15663
1.15826
1.16145
1.16599
1.17167
1.17821
1.18545
1.19265
0.998591
1.07704
1.10836
1.11548
1.11264
1.10623
1.09889
1.09178
1.0855
1.08039
1.07666
1.07442
1.07369
1.07445
1.07661
1.08005
1.08461
1.09012
1.0964
1.10286
0.913512
0.985651
1.01685
1.02605
1.02577
1.02176
1.0165
1.01107
1.00605
1.00179
0.998497
0.996328
0.995356
0.995603
0.997048
0.999629
1.00326
1.00782
1.01319
1.01885
0.837673
0.902368
0.932018
0.942102
0.943543
0.941352
0.937802
0.933862
0.930053
0.926698
0.924012
0.922143
0.921188
0.9212
0.9222
0.924171
0.927077
0.930846
0.935404
0.940312
0.770296
0.82706
0.854112
0.864096
0.866445
0.86547
0.863195
0.860455
0.857698
0.8552
0.853152
0.851688
0.850908
0.850879
0.851642
0.853212
0.855588
0.858736
0.86262
0.86688
0.710393
0.759291
0.783102
0.792225
0.794793
0.794472
0.793025
0.791148
0.789207
0.787423
0.785948
0.784894
0.784351
0.784385
0.785049
0.786375
0.788385
0.791073
0.794435
0.798177
0.656861
0.69837
0.71867
0.726432
0.728691
0.728549
0.727518
0.726164
0.724768
0.723493
0.722453
0.721732
0.721404
0.721529
0.722163
0.723348
0.725118
0.727488
0.730476
0.733848
0.608559
0.643418
0.660252
0.666411
0.668019
0.667701
0.666743
0.665595
0.664464
0.663463
0.662669
0.662144
0.661946
0.662132
0.662753
0.663857
0.665486
0.667663
0.670427
0.673591
0.564384
0.593437
0.607081
0.611626
0.612426
0.611709
0.610567
0.609353
0.60822
0.607244
0.606482
0.605983
0.605799
0.605983
0.606587
0.607661
0.609249
0.611378
0.614098
0.617263
0.523295
0.547362
0.558239
0.561343
0.561344
0.560128
0.558626
0.557124
0.555746
0.55456
0.553624
0.552993
0.552726
0.552879
0.55351
0.554667
0.556396
0.558721
0.561691
0.565203
0.484325
0.504111
0.512703
0.51467
0.513999
0.51229
0.510332
0.508384
0.506579
0.505018
0.503793
0.502989
0.502689
0.502966
0.503885
0.505496
0.507842
0.510933
0.514806
0.519404
0.446618
0.462669
0.469466
0.470675
0.469541
0.467477
0.465111
0.462729
0.460543
0.45874
0.457489
0.45694
0.457222
0.458435
0.460656
0.463933
0.468294
0.47372
0.480198
0.487716
0.409873
0.422655
0.428195
0.429198
0.428134
0.426336
0.424253
0.422287
0.420816
0.42019
0.420709
0.422619
0.426105
0.431287
0.438223
0.446906
0.45727
0.469181
0.482435
0.497039
0.378186
0.38882
0.394732
0.397761
0.399902
0.402008
0.404415
0.407668
0.412371
0.419054
0.428124
0.439839
0.454299
0.471446
0.49107
0.512824
0.53624
0.560783
0.585818
0.611336
0.39623
0.416227
0.436412
0.45771
0.480211
0.502442
0.52411
0.545745
0.568186
0.592157
0.618114
0.646181
0.67615
0.707528
0.739607
0.771572
0.802601
0.831949
0.859107
0.883964
0.962491
1.08928
1.21236
1.31082
1.37451
1.4072
1.41534
1.40571
1.38454
1.3569
1.32662
1.29637
1.26789
1.24221
1.21986
1.20095
1.18538
1.17281
1.1631
1.15427
)
;
boundaryField
{
inlet
{
type fixedValue;
value uniform 0.375;
}
outlet
{
type zeroGradient;
}
walls
{
type kqRWallFunction;
value nonuniform List<scalar>
560
(
0.58903
0.787733
0.94149
1.04569
1.10669
1.13449
1.13898
1.12832
1.10863
1.0842
1.0579
1.03161
1.00649
0.983269
0.962372
0.944045
0.928437
0.9157
0.905934
0.899815
0.898456
0.897491
0.896855
0.896525
0.896518
0.896852
0.897536
0.898574
0.899966
0.901708
0.90379
0.906197
0.90891
0.911905
0.915155
0.918629
0.922293
0.92611
0.93004
0.934042
0.93807
0.942079
0.946019
0.949841
0.95349
0.956909
0.960021
0.962684
0.96486
0.966503
0.967566
0.967996
0.967736
0.96673
0.964917
0.962238
0.95863
0.954036
0.948396
0.941654
0.933758
0.92466
0.914317
0.902695
0.889765
0.875511
0.859924
0.843012
0.824792
0.805298
0.784578
0.762697
0.739739
0.715801
0.690999
0.665466
0.639349
0.61281
0.586023
0.559172
0.532449
0.50605
0.480171
0.455008
0.430748
0.407566
0.385623
0.36506
0.345991
0.328503
0.312652
0.298458
0.285907
0.274952
0.265515
0.257691
0.251445
0.246411
0.242357
0.239127
0.236609
0.234721
0.233397
0.232581
0.232229
0.232301
0.23275
0.233709
0.235148
0.23686
0.238764
0.240819
0.243004
0.245297
0.247679
0.250128
0.252625
0.255145
0.257687
0.260358
0.263076
0.26579
0.268468
0.271088
0.273634
0.276092
0.278452
0.280706
0.28285
0.284879
0.286793
0.288594
0.290283
0.291865
0.293344
0.294727
0.29602
0.29723
0.298367
0.299436
0.300448
0.30141
0.30233
0.303217
0.304078
0.304922
0.305756
0.306586
0.307419
0.308261
0.309118
0.309995
0.310896
0.311825
0.312786
0.313782
0.314815
0.315887
0.316999
0.318152
0.319348
0.320585
0.321865
0.323187
0.324551
0.325957
0.327403
0.328891
0.33042
0.33199
0.333603
0.33526
0.336962
0.338713
0.340515
0.342373
0.344291
0.346276
0.348335
0.350475
0.352706
0.355038
0.357483
0.360053
0.362763
0.365628
0.368666
0.371895
0.375335
0.379009
0.382941
0.387156
0.391684
0.396553
0.401799
0.407455
0.41356
0.420157
0.427289
0.435006
0.443359
0.452405
0.462205
0.472823
0.48433
0.496801
0.510318
0.524965
0.540837
0.55803
0.576648
0.596798
0.618595
0.642153
0.667587
0.695011
0.724529
0.756234
0.790236
0.826637
0.962491
1.08928
1.21236
1.31082
1.37451
1.4072
1.41534
1.40571
1.38454
1.3569
1.32662
1.29637
1.26789
1.24221
1.21986
1.20095
1.18538
1.17281
1.1631
1.15427
0.58903
0.787733
0.941489
1.04569
1.10669
1.13449
1.13897
1.1283
1.1086
1.08414
1.05779
1.0314
1.00611
0.982592
0.961162
0.941885
0.924586
0.908775
0.89352
0.87631
1.71027
2.91483
1.70403
1.4798
1.40405
1.35298
1.30941
1.27046
1.23537
1.20369
1.17514
1.14944
1.12634
1.10556
1.08687
1.07006
1.05479
1.04118
1.02788
1.01884
0.0345519
0.0459715
0.0628167
0.0864037
0.125061
0.164046
0.201984
0.238802
0.275266
0.311153
0.345273
0.377491
0.407753
0.436064
0.462477
0.48708
0.509986
0.531325
0.55124
0.569878
0.587386
0.603911
0.619589
0.634549
0.648907
0.662766
0.676211
0.689315
0.70213
0.714696
0.727034
0.739153
0.751048
0.762701
0.774086
0.785165
0.795897
0.806233
0.816124
0.825517
0.834362
0.842612
0.850222
0.857157
0.863387
0.8689
0.873699
0.877638
0.880646
0.882696
0.883761
0.883815
0.882833
0.88079
0.877662
0.873423
0.868048
0.861511
0.853787
0.844854
0.834691
0.823285
0.810625
0.796708
0.781539
0.765129
0.747493
0.728641
0.708581
0.687343
0.664982
0.641551
0.617062
0.591529
0.564582
0.547192
0.538191
0.529383
0.520132
0.510379
0.500192
0.489676
0.47895
0.468124
0.457301
0.446568
0.435997
0.425647
0.415564
0.40578
0.396318
0.387193
0.378412
0.369976
0.361882
0.354124
0.346692
0.339576
0.332764
0.326242
0.319997
0.314017
0.308287
0.302795
0.297528
0.292474
0.287622
0.282959
0.278477
0.274173
0.270038
0.266063
0.262276
0.258849
0.255772
0.252941
0.250292
0.247784
0.245389
0.243089
0.240872
0.238729
0.236653
0.234638
0.232681
0.230778
0.228925
0.227122
0.225364
0.22365
0.221977
0.220345
0.21875
0.217191
0.215666
0.214174
0.212712
0.211279
0.209873
0.208492
0.207134
0.205797
0.204478
0.203176
0.201888
0.200612
0.199344
0.198082
0.196822
0.19556
0.194295
0.19302
0.191731
0.190425
0.189095
0.187736
0.186341
0.184904
0.183417
0.181872
0.18026
0.178571
0.176795
0.174918
0.172928
0.170811
0.16855
0.166128
0.163524
0.160719
0.157687
0.154404
0.15084
0.146967
0.142749
0.138154
0.133145
0.127686
0.121743
0.115289
0.108304
0.10079
0.0927767
0.0843455
0.0756522
0.0669687
0.0587096
0.0515424
0.0484368
0.0524182
0.0548562
0.0544036
0.0515189
0.048293
0.046587
0.0463228
0.0462063
0.0452817
0.0429362
0.0389421
0.0345519
0.060136
0.0825355
0.110243
0.138532
0.166337
0.193094
0.215348
0.234844
0.251874
0.266501
0.278771
0.288748
0.296516
0.302174
0.305835
0.307616
0.30764
0.306034
0.302923
0.298434
0.292693
0.285836
0.278094
0.270641
0.263436
0.256296
0.249101
0.241774
0.234277
0.226597
0.218754
0.210796
0.20279
0.194781
0.186703
0.178583
0.173595
0.197238
0.3786
0.0389421
0.0575696
0.0709504
0.0835806
0.0959521
0.108472
0.121491
0.135395
0.150738
0.167077
0.183812
0.20089
0.218308
0.235776
0.253375
0.271806
0.294452
0.31629
0.338464
0.361367
0.385119
0.409822
0.435611
0.46266
0.491186
0.521456
0.553803
0.588633
0.62646
0.667929
0.713866
0.765344
0.823781
0.891095
0.969958
1.06423
1.17973
1.32537
1.51396
1.76302
)
;
}
frontAndBack
{
type empty;
}
}
// ************************************************************************* //
| [
"jorgentyvand@.gmail.com"
] | jorgentyvand@.gmail.com | |
0b38a1f9a7788e2a5b9050aff677e1a360ccefe9 | 29eb7727f4b623613c9ed75e03674f13b7476aae | /src/libtsduck/dtv/tables/tsPMT.cpp | 164a337448817fc27029b17b0fd71d3c7e7c5ba2 | [
"BSD-2-Clause"
] | permissive | alevinsn/tsduck | a7d9b1a67545df9cb06e03ddd56c797cf62b8122 | 2740efc630713cae80e0fcd0f3f1db815e32fe3e | refs/heads/master | 2022-08-11T09:44:29.173337 | 2022-08-04T04:00:51 | 2022-08-04T04:00:51 | 154,222,385 | 0 | 0 | NOASSERTION | 2018-10-22T21:53:26 | 2018-10-22T21:53:26 | null | UTF-8 | C++ | false | false | 15,332 | cpp | //----------------------------------------------------------------------------
//
// TSDuck - The MPEG Transport Stream Toolkit
// Copyright (c) 2005-2020, Thierry Lelegard
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//
//----------------------------------------------------------------------------
#include "tsPMT.h"
#include "tsNames.h"
#include "tsBinaryTable.h"
#include "tsTablesDisplay.h"
#include "tsPSIRepository.h"
#include "tsDuckContext.h"
#include "tsxmlElement.h"
TSDUCK_SOURCE;
#define MY_XML_NAME u"PMT"
#define MY_CLASS ts::PMT
#define MY_TID ts::TID_PMT
#define MY_STD ts::STD_MPEG
TS_REGISTER_TABLE(MY_CLASS, {MY_TID}, MY_STD, MY_XML_NAME, MY_CLASS::DisplaySection);
//----------------------------------------------------------------------------
// Constructors
//----------------------------------------------------------------------------
ts::PMT::PMT(uint8_t version_, bool is_current_, uint16_t service_id_, PID pcr_pid_) :
AbstractLongTable(MY_TID, MY_XML_NAME, MY_STD, version_, is_current_),
service_id(service_id_),
pcr_pid(pcr_pid_),
descs(this),
streams(this)
{
_is_valid = true;
}
ts::PMT::PMT(const PMT& other) :
AbstractLongTable(other),
service_id(other.service_id),
pcr_pid(other.pcr_pid),
descs(this, other.descs),
streams(this, other.streams)
{
}
ts::PMT::PMT(DuckContext& duck, const BinaryTable& table) :
PMT()
{
deserialize(duck, table);
}
//----------------------------------------------------------------------------
// Deserialization
//----------------------------------------------------------------------------
void ts::PMT::deserializeContent(DuckContext& duck, const BinaryTable& table)
{
// Clear table content
service_id = 0;
pcr_pid = PID_NULL;
descs.clear();
streams.clear();
// Loop on all sections (although a PMT is not allowed to use more than
// one section, see ISO/IEC 13818-1:2000 2.4.4.8 & 2.4.4.9)
for (size_t si = 0; si < table.sectionCount(); ++si) {
// Reference to current section
const Section& sect(*table.sectionAt(si));
// Get common properties (should be identical in all sections)
version = sect.version();
is_current = sect.isCurrent();
service_id = sect.tableIdExtension();
// Analyze the section payload:
const uint8_t* data(sect.payload());
size_t remain(sect.payloadSize());
// Get PCR PID
if (remain < 2) {
return;
}
pcr_pid = GetUInt16(data) & 0x1FFF;
data += 2;
remain -= 2;
// Get program information descriptor list
if (remain < 2) {
return;
}
size_t info_length(GetUInt16(data) & 0x0FFF);
data += 2;
remain -= 2;
info_length = std::min(info_length, remain);
descs.add(data, info_length);
data += info_length;
remain -= info_length;
// Get elementary streams description
while (remain >= 5) {
PID pid = GetUInt16(data + 1) & 0x1FFF;
Stream& str(streams[pid]);
str.stream_type = data[0];
info_length = GetUInt16(data + 3) & 0x0FFF;
data += 5;
remain -= 5;
info_length = std::min(info_length, remain);
str.descs.add(data, info_length);
data += info_length;
remain -= info_length;
}
}
_is_valid = true;
}
//----------------------------------------------------------------------------
// Serialization
//----------------------------------------------------------------------------
void ts::PMT::serializeContent(DuckContext& duck, BinaryTable& table) const
{
// Build the section. Note that a PMT is not allowed to use more than
// one section, see ISO/IEC 13818-1:2000 2.4.4.8 & 2.4.4.9
uint8_t payload [MAX_PSI_LONG_SECTION_PAYLOAD_SIZE];
uint8_t* data = payload;
size_t remain = sizeof(payload);
// Add PCR PID
PutUInt16(data, pcr_pid | 0xE000);
data += 2;
remain -= 2;
// Insert program_info descriptor list (with leading length field)
descs.lengthSerialize(data, remain);
// Add description of all elementary streams
for (StreamMap::const_iterator it = streams.begin(); it != streams.end() && remain >= 5; ++it) {
// Insert stream type and pid
data[0] = it->second.stream_type;
PutUInt16(data + 1, it->first | 0xE000); // PID
data += 3;
remain -= 3;
// Insert descriptor list for elem. stream (with leading length field)
size_t next_index = it->second.descs.lengthSerialize(data, remain);
if (next_index != it->second.descs.count()) {
// Not enough space to serialize all descriptors in the section.
// A PMT cannot have more than one section.
// Return with table left in invalid state.
return;
}
}
// Add one single section in the table
table.addSection(new Section(MY_TID, // tid
false, // is_private_section
service_id, // tid_ext
version,
is_current,
0, // section_number,
0, // last_section_number
payload,
data - payload)); // payload_size,
}
//----------------------------------------------------------------------------
// Check if an elementary stream carries audio, video or subtitles.
// Does not just look at the stream type.
// Also analyzes the descriptor list for addional information.
//----------------------------------------------------------------------------
bool ts::PMT::Stream::isVideo() const
{
return IsVideoST(stream_type) ||
descs.search(DID_AVC_VIDEO) < descs.count() ||
descs.search(DID_HEVC_VIDEO) < descs.count() ||
descs.search(DID_MPEG4_VIDEO) < descs.count() ||
descs.search(DID_J2K_VIDEO) < descs.count();
}
bool ts::PMT::Stream::isAudio() const
{
// AC-3 or HE-AAC components may have "PES private data" stream type
// but are identified by specific descriptors.
return IsAudioST(stream_type) ||
descs.search(DID_DTS) < descs.count() ||
descs.search(DID_AC3) < descs.count() ||
descs.search(DID_ENHANCED_AC3) < descs.count() ||
descs.search(DID_AAC) < descs.count() ||
descs.search(EDID::ExtensionDVB(EDID_AC4)) < descs.count() ||
descs.search(EDID::ExtensionDVB(EDID_DTS_NEURAL)) < descs.count() ||
descs.search(EDID::ExtensionDVB(EDID_DTS_HD_AUDIO)) < descs.count();
}
bool ts::PMT::Stream::isSubtitles() const
{
// A subtitling descriptor always indicates subtitles.
if (descs.search(DID_SUBTITLING) < descs.count()) {
return true;
}
// A teletext descriptor may indicate subtitles
for (size_t index = 0; (index = descs.search(DID_TELETEXT, index)) < descs.count(); ++index) {
// Get descriptor payload
const uint8_t* data = descs[index]->payload();
size_t size = descs[index]->payloadSize();
// Loop on all language entries, check if teletext type is a subtitle
while (size >= 5) {
uint8_t ttype = data[3] >> 3;
if (ttype == 0x02 || ttype == 0x05) {
return true; // teletext subtitles types
}
data += 5;
size -= 5;
}
}
// After all, no subtitle here...
return false;
}
//----------------------------------------------------------------------------
// Look for a component tag in a stream_identifier_descriptor.
//----------------------------------------------------------------------------
bool ts::PMT::Stream::getComponentTag(uint8_t& tag) const
{
// Loop on all stream_identifier_descriptors until a valid one is found.
for (size_t i = descs.search(DID_STREAM_ID); i < descs.count(); i = descs.search(DID_STREAM_ID, i + 1)) {
if (!descs[i].isNull() && descs[i]->payloadSize() >= 1) {
// The payload of the stream_identifier_descriptor contains only one byte, the component tag.
tag = descs[i]->payload()[0];
return true;
}
}
return false;
}
//----------------------------------------------------------------------------
// Search the component PID for a given component tag.
//----------------------------------------------------------------------------
ts::PID ts::PMT::componentTagToPID(uint8_t tag) const
{
// Loop on all components of the service.
for (auto it = streams.begin(); it != streams.end(); ++it) {
const PID pid = it->first;
const PMT::Stream& stream(it->second);
// Loop on all stream_identifier_descriptors.
for (size_t i = stream.descs.search(DID_STREAM_ID); i < stream.descs.count(); i = stream.descs.search(DID_STREAM_ID, i + 1)) {
// The payload of the stream_identifier_descriptor contains only one byte, the component tag.
if (!stream.descs[i].isNull() && stream.descs[i]->payloadSize() >= 1 && stream.descs[i]->payload()[0] == tag) {
return pid;
}
}
}
return PID_NULL; // not found
}
//----------------------------------------------------------------------------
// Search the first video PID in the service.
//----------------------------------------------------------------------------
ts::PID ts::PMT::firstVideoPID() const
{
for (auto it = streams.begin(); it != streams.end(); ++it) {
if (it->second.isVideo()) {
return it->first;
}
}
return PID_NULL; // not found
}
//----------------------------------------------------------------------------
// A static method to display a PMT section.
//----------------------------------------------------------------------------
void ts::PMT::DisplaySection(TablesDisplay& display, const ts::Section& section, int indent)
{
DuckContext& duck(display.duck());
std::ostream& strm(duck.out());
const std::string margin(indent, ' ');
const uint8_t* data = section.payload();
size_t size = section.payloadSize();
if (size >= 4) {
// Fixed part
PID pid = GetUInt16(data) & 0x1FFF;
size_t info_length = GetUInt16(data + 2) & 0x0FFF;
data += 4; size -= 4;
if (info_length > size) {
info_length = size;
}
strm << margin << UString::Format(u"Program: %d (0x%X)", {section.tableIdExtension(), section.tableIdExtension()})
<< ", PCR PID: ";
if (pid == PID_NULL) {
strm << "none";
}
else {
strm << pid << UString::Format(u" (0x%X)", {pid});
}
strm << std::endl;
// Process and display "program info"
if (info_length > 0) {
strm << margin << "Program information:" << std::endl;
display.displayDescriptorList(section, data, info_length, indent);
}
data += info_length; size -= info_length;
// Process and display "elementary stream info"
while (size >= 5) {
uint8_t stream = *data;
PID es_pid = GetUInt16(data + 1) & 0x1FFF;
size_t es_info_length = GetUInt16(data + 3) & 0x0FFF;
data += 5; size -= 5;
if (es_info_length > size) {
es_info_length = size;
}
strm << margin << "Elementary stream: type " << names::StreamType(stream, names::FIRST)
<< ", PID: " << es_pid << UString::Format(u" (0x%X)", {es_pid}) << std::endl;
display.displayDescriptorList(section, data, es_info_length, indent);
data += es_info_length; size -= es_info_length;
}
}
display.displayExtraData(data, size, indent);
}
//----------------------------------------------------------------------------
// XML serialization
//----------------------------------------------------------------------------
void ts::PMT::buildXML(DuckContext& duck, xml::Element* root) const
{
root->setIntAttribute(u"version", version);
root->setBoolAttribute(u"current", is_current);
root->setIntAttribute(u"service_id", service_id, true);
if (pcr_pid != PID_NULL) {
root->setIntAttribute(u"PCR_PID", pcr_pid, true);
}
descs.toXML(duck, root);
for (StreamMap::const_iterator it = streams.begin(); it != streams.end(); ++it) {
xml::Element* e = root->addElement(u"component");
e->setIntAttribute(u"elementary_PID", it->first, true);
e->setIntAttribute(u"stream_type", it->second.stream_type, true);
it->second.descs.toXML(duck, e);
}
}
//----------------------------------------------------------------------------
// XML deserialization
//----------------------------------------------------------------------------
void ts::PMT::fromXML(DuckContext& duck, const xml::Element* element)
{
descs.clear();
streams.clear();
xml::ElementVector children;
_is_valid =
checkXMLName(element) &&
element->getIntAttribute<uint8_t>(version, u"version", false, 0, 0, 31) &&
element->getBoolAttribute(is_current, u"current", false, true) &&
element->getIntAttribute<uint16_t>(service_id, u"service_id", true, 0, 0x0000, 0xFFFF) &&
element->getIntAttribute<PID>(pcr_pid, u"PCR_PID", false, PID_NULL, 0x0000, 0x1FFF) &&
descs.fromXML(duck, children, element, u"component");
for (size_t index = 0; _is_valid && index < children.size(); ++index) {
PID pid = PID_NULL;
_is_valid =
children[index]->getIntAttribute<PID>(pid, u"elementary_PID", true, 0, 0x0000, 0x1FFF) &&
children[index]->getIntAttribute<uint8_t>(streams[pid].stream_type, u"stream_type", true, 0, 0x00, 0xFF) &&
streams[pid].descs.fromXML(duck, children[index]);
}
}
| [
"thierry@lelegard.fr"
] | thierry@lelegard.fr |
32fa325b5705ae157516ce6117d036db0035631a | 801a5f1df5f01aae04369f3f6097a469da11c601 | /Solved Problems/Борщ картопля і салат/algo.cpp | 60b20a04065c4c24a03b861561232b9fc41ed535 | [] | no_license | NazarSmith228/Competitive_Programming | 0c8e047a5ebc3ccc31f1ec40982162a3cadf0b75 | 9bab16788d150fff060f5d1167fdd7dd8819bbff | refs/heads/main | 2023-06-08T06:33:41.071352 | 2021-06-27T00:52:25 | 2021-06-27T00:52:25 | 377,292,161 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,038 | cpp | #include <iostream>
#include <math.h>
using namespace std;
void qsort(int *array, int low, int high)
{
int i = low;
int j = high;
int pivot = array[(i + j) / 2];
int temp;
while (i <= j)
{
while (array[i] < pivot)
i++;
while (array[j] > pivot)
j--;
if (i <= j)
{
temp = array[i];
array[i] = array[j];
array[j] = temp;
i++;
j--;
}
}
if (j > low)
qsort(array, low, j);
if (i < high)
qsort(array, i, high);
}
int main() {
int N;
cin >> N;
int array[N][3];
int med1=0,med2=0,med3=0;
int sum = 0;
for(int i = 0; i < N; i++){
for(int j = 0; j < 3; j++){
cin >> array[i][j];
}
}
int arr1[N],arr2[N],arr3[N];
for(int i = 0; i < N; i++){
for(int j = 0; j < 3; j++){
arr1[i] = array[i][0];
arr2[i] = array[i][1];
arr3[i] = array[i][2];
}
}
qsort(arr1,0,N-1);
qsort(arr2,0,N-1);
qsort(arr3,0,N-1);
if(N != 0 && N != 1) {
if (N % 2 == 0) {
for (int i = 0; i < N; i++) {
med1 = (arr1[(sizeof(arr1) / sizeof(arr1[0])) / 2] + arr1[(sizeof(arr1) / sizeof(arr1[0])) / 2 - 1]) / 2;
med2 = (arr2[(sizeof(arr2) / sizeof(arr2[0])) / 2] + arr2[(sizeof(arr2) / sizeof(arr2[0])) / 2 - 1]) / 2;
med3 = (arr3[(sizeof(arr3) / sizeof(arr3[0])) / 2] + arr3[(sizeof(arr3) / sizeof(arr3[0])) / 2 - 1]) / 2;
}
} else {
for (int i = 0; i < N; i++) {
med1 = arr1[(sizeof(arr1) / sizeof(arr1[0])) / 2];
med2 = arr2[(sizeof(arr2) / sizeof(arr2[0])) / 2];
med3 = arr3[(sizeof(arr3) / sizeof(arr3[0])) / 2];
}
}
}
for(int i = 0; i < N; i++){
sum += (abs(med1 - array[i][0]) + abs(med2 - array[i][1]) + abs(med3 - array[i][2]));
}
cout << sum << endl;
return 0;
}
| [
"nazarmarmeladka228@gmail.com"
] | nazarmarmeladka228@gmail.com |
76621d0bdc68f0153be34340fc12cb5e11780ef8 | 8e758fc6b49b3cab5c05b306f52216e5ebeb02e9 | /singleboardregdialog.cpp | 5f85cbd90317afbd28c8ab4f3f9970e9e3b36e2b | [] | no_license | qingwu/nantong | 27bcc76b46768dd593ec751407c9604f24f13bc8 | eb1ce637803ace4bf570e04ea542ac1e93144cf5 | refs/heads/master | 2021-01-01T05:35:17.632695 | 2013-06-03T02:46:55 | 2013-06-03T02:46:55 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 3,171 | cpp | #include <QtGui/QLabel>
#include <QtGui/QLineEdit>
#include <QtGui/QPushButton>
#include <QtGui/QFont>
#include <QMessageBox>
#include <QDebug>
#include "singleboardregdialog.h"
singleBoardRegDialog::singleBoardRegDialog(QWidget *parent) :
QDialog(parent)
{
this->setFixedSize(QSize(480, 240));
setWindowTitle(tr("接口板登记设置"));
QFont f("unifont", 12, QFont::Light);
singleRegInfoLabel = new QLabel(this);
singleRegInfoLabel->setText(tr("► 单接口板登记 : 请输入要登记的接口板号,并点击确定."));
singleRegInfoLabel->setAlignment(Qt::AlignCenter);
singleRegInfoLabel->setFont(f);
singleRegInfoLabel->setGeometry(QRect(40,20,420,40));
singleBoardNumLabel = new QLabel(this);
singleBoardNumLabel->setText(tr("接口板号 :"));
singleBoardNumLabel->setGeometry(QRect(110,70,70,24));
boardNumLineEdit = new QLineEdit(this);
boardNumLineEdit->setGeometry(QRect(195,68,60,30));
boardNumLineEdit->setAlignment(Qt::AlignCenter);
singleOkPushButton = new QPushButton(tr("确定"),this);
singleOkPushButton->setGeometry(QRect(330,70,85,27));
allRegInfoLabel = new QLabel(this);
allRegInfoLabel->setText(tr("► 全体接口板登记,请点击下方按钮:"));
allRegInfoLabel->setGeometry(QRect(40,120,270,40));
allRegInfoLabel->setFont(f);
allRegPushButton = new QPushButton(tr("全体接口板登记"),this);
//allRegPushButton->setText(tr("全体接口板登记"));
allRegPushButton->setGeometry(QRect(130,170,140,27));
cancelPushButton = new QPushButton(tr("取消"),this);
cancelPushButton->setGeometry(QRect(330,170,85,27));
connect(singleOkPushButton,SIGNAL(clicked()),this,SLOT(emitSingleBoardNum()));
connect(allRegPushButton,SIGNAL(clicked()),this,SIGNAL(allRegPushButtonClicked()));
connect(cancelPushButton,SIGNAL(clicked()),this,SIGNAL(close()));
connect(this->parentWidget(), SIGNAL(closedialog()), this, SLOT(close()));
}
void singleBoardRegDialog::emitSingleBoardNum()
{
QString numstr = boardNumLineEdit->text();
qDebug()<<"signel board to reg is: "<<numstr;
int num = numstr.toInt();
if( (num < 35) && (num > 0))
{
emit singleBoardNumSignal(num);
}
else
{
QMessageBox *box = new QMessageBox(tr("接口板登记"),
tr("请输入正确的接口板号[0,34]"),
QMessageBox::Information,
QMessageBox::Ok,
QMessageBox::NoButton,
QMessageBox::NoButton,
this);
box->setAttribute(Qt::WA_DeleteOnClose);
connect(this->parentWidget(), SIGNAL(closedialog()), box, SLOT(close()));
box->exec();
}
}
singleBoardRegDialog::~singleBoardRegDialog()
{
delete(singleRegInfoLabel);
delete(allRegInfoLabel);
delete(singleBoardNumLabel);
delete(boardNumLineEdit);
delete(singleOkPushButton);
delete(allRegPushButton);
delete(cancelPushButton);
}
| [
"qingwuhust@qq.com"
] | qingwuhust@qq.com |
682e1d00f3e300b15071e24fa3a8c91d26b71a44 | cefd6c17774b5c94240d57adccef57d9bba4a2e9 | /ApacheConnector/include/ApacheChannel.h | 8cf3f266a44a25610fe46feda57a54a7f80aa1e7 | [
"BSL-1.0"
] | permissive | adzhou/oragle | 9c054c25b24ff0a65cb9639bafd02aac2bcdce8b | 5442d418b87d0da161429ffa5cb83777e9b38e4d | refs/heads/master | 2022-11-01T05:04:59.368831 | 2014-03-12T15:50:08 | 2014-03-12T15:50:08 | 17,238,063 | 0 | 1 | BSL-1.0 | 2022-10-18T04:23:53 | 2014-02-27T05:39:44 | C++ | UTF-8 | C++ | false | false | 1,934 | h | //
// ApacheChannel.h
//
// $Id: //poco/1.4/ApacheConnector/include/ApacheChannel.h#2 $
//
// Copyright (c) 2006-2011, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
#ifndef ApacheConnector_ApacheChannel_INCLUDED
#define ApacheConnector_ApacheChannel_INCLUDED
#include "Poco/Channel.h"
class ApacheChannel: public Poco::Channel
/// This class implements a logging channel
/// that uses the Apache logging facilities.
{
public:
ApacheChannel();
~ApacheChannel();
void log(const Poco::Message& msg);
};
#endif // ApacheConnector_ApacheChannel_INCLUDED
| [
"adzhou@hp.com"
] | adzhou@hp.com |
9899e7e7b2348125f05d7a836a8bc2f7c12e13ce | a1c8c86f63885afa2da28d156c46b39d42729e30 | /OpenTESArena/src/Assets/ExeStrings.h | 9f3ff7178f09d1bcdbe30f92983ff6df15e82417 | [
"MIT"
] | permissive | dlongo13/OpenTESArena | 11996ca587236a82571797d81a2f9d57f6a9122c | 820c583e71e706c6c99b3da623bd8fe427269d0a | refs/heads/master | 2021-01-20T06:04:14.041104 | 2017-04-28T19:45:21 | 2017-04-28T19:45:21 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,739 | h | #ifndef EXE_STRINGS_H
#define EXE_STRINGS_H
#include <vector>
// This file has various offsets and sizes for strings in the decompressed A.EXE.
// I'm not sure how I want to store these in the long run, so this is a rough draft
// layout for now.
namespace ExeStrings
{
// Character creation.
const std::pair<int, int> ChooseClassCreation(0x00035a80, 37);
const std::pair<int, int> ChooseClassCreationGenerate(0x0003f637, 8);
const std::pair<int, int> ChooseClassCreationSelect(0x0003f641, 6);
const std::pair<int, int> ClassQuestionsIntro(0x00035aa7, 175);
const std::pair<int, int> SuggestedRace(0x00035bb1, 75);
const std::pair<int, int> ChooseClassList(0x0003f61a, 19);
const std::pair<int, int> ChooseName(0x00035b58, 26);
const std::pair<int, int> ChooseGender(0x00035b74, 20);
const std::pair<int, int> ChooseGenderMale(0x0003f652, 4);
const std::pair<int, int> ChooseGenderFemale(0x0003f658, 6);
const std::pair<int, int> ChooseRace(0x00035b8a, 37);
const std::pair<int, int> ConfirmRace(0x00035bff, 74);
const std::pair<int, int> FinalRaceMessage(0x00035ce0, 67);
const std::pair<int, int> DistributeClassPoints(0x00035d25, 93);
const std::pair<int, int> ChooseAppearance(0x00035d84, 174);
// Class names. Unordered.
const std::vector<std::pair<int, int>> MageClassNames =
{
{ 0x0003e15e, 4 },
{ 0x0003e163, 10 },
{ 0x0003e16e, 10 },
{ 0x0003e179, 8 },
{ 0x0003e182, 6 },
{ 0x0003e189, 10 }
};
const std::vector<std::pair<int, int>> ThiefClassNames =
{
{ 0x0003e194, 4 },
{ 0x0003e199, 7 },
{ 0x0003e1a1, 5 },
{ 0x0003e1a7, 7 },
{ 0x0003e1af, 5 },
{ 0x0003e1b5, 8 }
};
const std::vector<std::pair<int, int>> WarriorClassNames =
{
{ 0x0003e1be, 4 },
{ 0x0003e1c3, 6 },
{ 0x0003e1ca, 6 },
{ 0x0003e1d1, 9 },
{ 0x0003e1db, 7 },
{ 0x0003e1e3, 6 }
};
// Province names.
// - Ordered by appearance on the world map reading from top left to bottom right,
// with the center province last.
const std::vector<std::pair<int, int>> ProvinceNames =
{
{ 0x000392f8, 9 },
{ 0x0003935a, 10 },
{ 0x000393bc, 6 },
{ 0x0003941e, 9 },
{ 0x00039480, 14 },
{ 0x000394e2, 9 },
{ 0x00039544, 7 },
{ 0x000395a6, 11 },
{ 0x00039608, 17 }
};
// Race names. Ordered the same as provinces.
const std::vector<std::pair<int, int>> RaceNamesSingular =
{
{ 0x0003e290, 6 },
{ 0x0003e297, 8 },
{ 0x0003e2a0, 4 },
{ 0x0003e2a5, 8 },
{ 0x0003e2ae, 8 },
{ 0x0003e2b7, 8 },
{ 0x0003e2c0, 7 },
{ 0x0003e2c8, 8 }
};
const std::vector<std::pair<int, int>> RaceNamesPlural =
{
{ 0x0003e245, 7 },
{ 0x0003e24d, 9 },
{ 0x0003e257, 5 },
{ 0x0003e25d, 10 },
{ 0x0003e268, 10 },
{ 0x0003e273, 10 },
{ 0x0003e27e, 7 },
{ 0x0003e286, 9 }
};
// Logbook.
const std::pair<int, int> LogbookIsEmpty(0x00042f45, 22);
}
#endif
| [
"afritz1@users.noreply.github.com"
] | afritz1@users.noreply.github.com |
c0bda4d41288c67fae6218da3eb79f130be0dced | 748efcdb7d3c35c7e634a7acda8a802a0d8a4e34 | /src/chess_piece.cpp | 079374f413faf15a74c51261ec10146729871864 | [] | no_license | mbusy/chess | ad9cc951866c8ac649046294926b44147ce507a9 | cf8d51e34e4e1b854bcc75d9eb720200768f565c | refs/heads/master | 2023-04-15T01:18:21.912711 | 2021-05-05T17:21:41 | 2021-05-05T17:21:41 | 358,408,752 | 1 | 1 | null | 2021-05-05T17:21:41 | 2021-04-15T22:29:06 | C++ | UTF-8 | C++ | false | false | 1,685 | cpp | #include "chess/chess_piece.hpp"
std::map<PieceId, std::map<PieceType, sf::Texture>> ChessPiece::texture_map;
PieceType ChessPiece::get_piece_type() const {
return this->piece_type;
}
PieceDirection ChessPiece::get_piece_direction() const {
return this->piece_direction;
}
PieceId ChessPiece::get_piece_id() const {
return this->piece_id;
}
sf::Sprite& ChessPiece::get_sprite() {
return this->sprite;
}
sf::Vector2i ChessPiece::get_position() const {
return utils::helpers::to_board_index(this->sprite.getPosition());
}
bool ChessPiece::has_piece_moved() const {
return this->has_moved;
}
void ChessPiece::signal_piece_moved() {
this->has_moved = true;
}
void ChessPiece::_load_texture() {
// If the texture hasn't already been loaded, load it
if (ChessPiece::texture_map.find(this->piece_id) ==
ChessPiece::texture_map.end() ||
ChessPiece::texture_map[this->piece_id].find(this->piece_type) ==
ChessPiece::texture_map[this->piece_id].end()) {
ChessPiece::texture_map[this->piece_id][this->piece_type].loadFromFile(
utils::helpers::get_piece_filepath(this->piece_id, this->piece_type));
}
// Apply the texture to the sprite
this->sprite.setTexture(
ChessPiece::texture_map[this->piece_id][this->piece_type]);
// Automatically resizes the sprite
float piece_scale_x = utils::Settings::get_cell_size() /
this->sprite.getTexture()->getSize().x;
float piece_scale_y = utils::Settings::get_cell_size() /
this->sprite.getTexture()->getSize().y;
this->sprite.setScale(
piece_scale_x,
piece_scale_y);
} | [
"mbusy.pro@gmail.com"
] | mbusy.pro@gmail.com |
57ed89fb40a3999c2b16b467bfffea283ed4efa8 | 0eff74b05b60098333ad66cf801bdd93becc9ea4 | /second/download/git/gumtree/git_new_log_1808.cpp | 367378af5613875d8d69165542dfe02612d12d8f | [] | no_license | niuxu18/logTracker-old | 97543445ea7e414ed40bdc681239365d33418975 | f2b060f13a0295387fe02187543db124916eb446 | refs/heads/master | 2021-09-13T21:39:37.686481 | 2017-12-11T03:36:34 | 2017-12-11T03:36:34 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 60 | cpp | status_printf_ln(s, c, _("Changes not staged for commit:")); | [
"993273596@qq.com"
] | 993273596@qq.com |
2e056db38ec08a1d03294e63d18cf52c949356a7 | f2a79b708c0f93416cc537c527744b2306643de1 | /Source/Authh.h | 278f6529bd07334974587543816500883d1304ee | [] | no_license | gamekillerat/ephesys | a37008b3a29dbc6539fc953568c8368505dd30fe | 1d9508df6f783e8a006709369a79afce1f8365ab | refs/heads/main | 2023-06-05T18:38:08.134832 | 2021-06-28T07:30:12 | 2021-06-28T07:30:12 | 380,944,101 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 145 | h | #pragma once
#include "stdafx.h"
wstring s2ws(const std::string& str);
extern int menuauthlevel;
void get_auth_status(std::string license); | [
"noreply@github.com"
] | noreply@github.com |
0b8a2098de67b2a5048b60864957243f7db32057 | edaa018b9a74c843b082f5cddafb0ec3740170a1 | /Codeforces30DayTraining/PolandBallAndHypothesis__755A.cpp | a9586dfd8b1b472d6eb6616f0c290a289b309149 | [] | no_license | m0saan/CP | e4eb6b363f68e82d59463281abdf5878188b324d | 88663bf32920403ae1ce4ba4529a8650ac42459a | refs/heads/master | 2023-06-07T13:19:42.217299 | 2021-07-04T19:16:57 | 2021-07-04T19:16:57 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 458 | cpp | #include <iostream>
#include <cmath>
using namespace std;
bool is_prime(int x) {
if (x <= 3) return x > 1;
if (x % 2 == 0 || x % 3 == 0) return false;
for (int i = 5; pow(i,2) <= x; i += 6)
if (x % i == 0 || x % (i + 2) == 0) return false;
return true;
}
int main() {
int n;
cin >> n;
for (int i = 1;i<1000; ++i) {
if (!is_prime(n * i + 1)) {
cout << i << endl;
return 0;
}
}
} | [
"moboustta6@gmail.com"
] | moboustta6@gmail.com |
1692492baac67cda4aba76ba6aa832298136fd13 | 5b62aff431fe89cdc993de918b666869498432df | /CourseWork1/ContinuousCalculator.cpp | f84653a002e7e2a9ae0894f0a7dcd49fe2362799 | [] | no_license | JustinCar/Game_Engineering_Coursework_1 | 86d6e5af6df64f76190af93b83520b2f5521d39e | bf6f0f62303a56ffc2915b9f40e2ef17ebd64327 | refs/heads/master | 2022-03-13T16:16:28.389243 | 2019-10-22T13:10:45 | 2019-10-22T13:10:45 | 214,403,611 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,585 | cpp | #include "ContinuousCalculator.h"
ContinuousCalculator::ContinuousCalculator(Puzzle* puzzleVal) : puzzle (puzzleVal)
{
unsigned long long occursions = numberOfOccursions();
unsigned long long occursionsBottomRow = numberOfOccursionsBottomRow();
unsigned long long continous = continuousNumberCountEqualToDimension();
unsigned long long continousBottomRow = continuousNumberCountEqualToDimensionMinusOne();
unsigned long long numberOfContinuous = occursions * continous;
unsigned long long numberOfContinuousBottomRow = occursionsBottomRow * continousBottomRow;
unsigned long long totalNumberOfContinuous = numberOfContinuous + numberOfContinuousBottomRow;
container = new ContinuousCount(totalNumberOfContinuous, totalNumberOfContinuous, totalNumberOfContinuous, totalNumberOfContinuous);
}
ContinuousCount& ContinuousCalculator::getContainer() const
{
return *container;
}
int ContinuousCalculator::factorial(int n)
{
if (n > 1)
return n * factorial(n - 1);
else
return 1;
}
int ContinuousCalculator::numberOfOccursions()
{
return (factorial((puzzle->getSize() - puzzle->getDimensions())) / 2) * (puzzle->getDimensions() - 1);
}
int ContinuousCalculator::numberOfOccursionsBottomRow()
{
return (factorial((puzzle->getSize() - (puzzle->getDimensions() - 1))) / 2);
}
int ContinuousCalculator::continuousNumberCountEqualToDimension()
{
std::vector<int> arr;
for (int i = 0; i < puzzle->getSize(); i++)
{
arr.push_back(puzzle->getOriginal()[i]);
}
std::sort(arr.begin(), arr.begin() + arr.size());
int continuousCount = 0;
for (int i = 1; i < arr.size(); i++)
{
int counter = 0;
for (int j = i; j < (i + (puzzle->getDimensions() - 1)); j++)
{
if (j >= arr.size())
break;
if ((arr[j] - 1) == (arr[j - 1]))
counter++;
}
if (counter == puzzle->getDimensions() - 1)
continuousCount++;
}
return continuousCount;
}
//The number of continous rows that could appear in the bottom row or right most column
int ContinuousCalculator::continuousNumberCountEqualToDimensionMinusOne()
{
std::vector<int> arr;
for (int i = 0; i < puzzle->getSize(); i++)
{
arr.push_back(puzzle->getOriginal()[i]);
}
std::sort(arr.begin(), arr.begin() + arr.size());
int continuousCount = 0;
for (int i = 1; i < arr.size(); i++)
{
int counter = 0;
for (int j = i; j < (i + (puzzle->getDimensions() - 2)); j++)
{
if (j >= arr.size())
break;
if ((arr[j] - 1) == arr[j - 1])
counter++;
else
break;
}
if (counter == puzzle->getDimensions() - 2)
continuousCount++;
}
return continuousCount;
} | [
"carterjustin67@gmail.com"
] | carterjustin67@gmail.com |
13ed91efa9dfbeecfc76335bad18a83d6af766e1 | 57f949c24d3ad70a35e5c4e10fb8a0962cef0fc9 | /Strings/CountAndSay.cpp | a2af3c6ea30172bc1cf9401aafc2b9eca27c16f9 | [] | no_license | harshitmuhal/InterviewBit | 98eae44cb2eaa5484a30e2f5141873d8d82a7b5a | c1be2b214de85b64c3cea43024bfacdd1f7f7e11 | refs/heads/master | 2022-12-23T19:56:08.913716 | 2020-10-01T07:26:50 | 2020-10-01T07:26:50 | 265,763,901 | 0 | 1 | null | 2020-05-21T05:37:06 | 2020-05-21T05:37:05 | null | UTF-8 | C++ | false | false | 940 | cpp | /*
The count-and-say sequence is the sequence of integers beginning as follows:
1, 11, 21, 1211, 111221, ...
1 is read off as one 1 or 11.
11 is read off as two 1s or 21.
21 is read off as one 2, then one 1 or 1211.
Given an integer n, generate the nth sequence.
Note: The sequence of integers will be represented as a string.
Example:
if n = 2,
the sequence is 11.
LINK: https://www.interviewbit.com/problems/count-and-say/
*/
string Solution::countAndSay(int n)
{
string s="1";
n--;
while(n--)
{
string temp="";
int cnt=1, len=s.size();
for(int i=1;i<len;i++)
{
if(s[i]==s[i-1])
cnt++;
else
{
temp.push_back('0'+cnt);
temp.push_back(s[i-1]);
cnt=1;
}
}
temp.push_back('0'+cnt);
temp.push_back(s[len-1]);
s = temp;
}
return s;
} | [
"noreply@github.com"
] | noreply@github.com |
2c2968e310cf6e506182c31f941f4683b64321dc | 27fbce7c075cd9f4cee7e1250e82cd56a7699c02 | /tao/x11/ior_table/ior_table.cpp | bafe62f8c244fa0307dc48606387b140a05f4fb9 | [
"MIT"
] | permissive | jwillemsen/taox11 | fe11af6a7185c25d0f236b80c608becbdbf3c8c3 | f16805cfdd5124d93d2426094191f15e10f53123 | refs/heads/master | 2023-09-04T18:23:46.570811 | 2023-08-14T19:50:01 | 2023-08-14T19:50:01 | 221,247,177 | 0 | 0 | MIT | 2023-09-04T14:53:28 | 2019-11-12T15:14:26 | C++ | UTF-8 | C++ | false | false | 748 | cpp | /**
* @file ior_table.cpp
* @author Marcel Smit
*
* @brief Loader of the IORTable library
*
* @copyright Copyright (c) Remedy IT Expertise BV
*/
#include "tao/x11/ior_table/ior_table.h"
#include "ace/Service_Config.h"
int
TAOX11_IORTable_Loader::Initializer ()
{
return ACE_Service_Config::process_directive (ace_svc_desc_TAOX11_IORTable_Loader);
}
ACE_STATIC_SVC_DEFINE (TAOX11_IORTable_Loader,
ACE_TEXT ("TAOX11_IORTable_Loader"),
ACE_SVC_OBJ_T,
&ACE_SVC_NAME (TAOX11_IORTable_Loader),
ACE_Service_Type::DELETE_THIS | ACE_Service_Type::DELETE_OBJ,
0)
ACE_FACTORY_DEFINE (TAOX11_IORTable, TAOX11_IORTable_Loader)
| [
"jwillemsen@remedy.nl"
] | jwillemsen@remedy.nl |
075465e3c66bb1921833108f270403e249cd98f9 | ec8900db3a5bd78f2d2cfa0a09f9fca5d159056a | /src/reverse_digits/answer.cc | efa82f32a517440f126b90cb584da3531be3c3eb | [] | no_license | danielmoraes/epi | 19034ca64d28661e137d2efde97775bc84dcfe21 | bfb7aa862fa22d51d3b8a057448844cfd81052fc | refs/heads/master | 2021-01-25T09:38:39.139700 | 2018-11-20T21:08:07 | 2018-11-20T21:10:57 | 93,869,026 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 490 | cc | /*
*
* EPI
* Reverse Digits (5.8)
*
*/
#include <iostream>
#include <primitive_types.h>
using namespace std;
int main() {
// test with positive numbers
for ( int i = 0; i < 10; i++ ) {
int x = rand() % 1000 + 1; // between 1 and 1000
cout << x << ":" << reverse_digits(x) << endl;
}
// test with negative numbers
for ( int i = 0; i < 10; i++ ) {
int x = -(rand() % 1000 + 1); // between -1 and -1000
cout << x << ":" << reverse_digits(x) << endl;
}
}
| [
"daniel.b.moraes@gmail.com"
] | daniel.b.moraes@gmail.com |
a633d49eab14afc697affa9e34d68bf7b42367e5 | 5a747a2f890201aee3ff8c48d23452be738f736c | /oop/Union and Intersection.cpp | 50376765428c6c46187e724447b1253b71cadbd1 | [] | no_license | Pratik-2662/coll | b2005413e9f874339fe6faf494ed9bcd17daac05 | b0288bcd8d868f618254b62365cec84c6b2e3e9e | refs/heads/main | 2023-02-09T02:00:55.035162 | 2020-12-31T11:08:33 | 2020-12-31T11:08:33 | 325,780,281 | 0 | 0 | null | 2020-12-31T10:56:06 | 2020-12-31T10:56:05 | null | UTF-8 | C++ | false | false | 1,685 | cpp | #include<iostream>
using namespace std;
void inter(int *c,int q,int *b,int s){ // intersection
for (int i = 0; i < q; i++)
{
for (int a = 0; a < s; a++)
{
if(c[a]==b[i]){
cout<< c[a]<<" ";
}
}
}
}
void unio(int *a,int q,int *b,int s){ // union
int l1=0,l2=0;
q=q-1;
s=s-1;
while(l1 < q or l2 < s)
{
if (a[l1]==b[l2])
{
cout<<a[l1]<<" ";
if(l1<q){l1++;}
if(l2<s){l2++;}
}
if (a[l1]<b[l2])
{
cout<<a[l1]<<" ";
if(l1<q){l1++;}
}
if (a[l1]>b[l2])
{
cout<<b[l2]<<" ";
if(l2<s){l2++;}
}
if(l1==q){
for (int i = l2; i <=s; i++)
{
cout<<b[i]<<" ";
}
break;
}
if(l2==s){
for (int i = l1; i <=q; i++)
{
cout<<b[i]<<" ";
}
break;
}
}
}
int main(){
int n1,n2;
cout<<"Enter no of elements in list 1"<<endl;
cin>>n1;
int a[n1];
cout<<"Enter elements of list 1"<<endl;
for (int i = 0; i < n1; i++)
{
cin>>a[i];
}
cout<<endl;
cout<<"Enter no of elements in list 2"<<endl;
cin>>n2;
int b[n2];
cout<<"Enter elements of list 2"<<endl;
for (int i = 0; i < n2; i++)
{
cin>>b[i];
}
cout<<endl;
cout<<"union is ";
unio(a,n1,b,n2);
cout<<endl;
cout<<"intersecction is ";
inter(a,n1,b,n2);
cout<<endl;
return 0;
} | [
"65172744+vinaymane44@users.noreply.github.com"
] | 65172744+vinaymane44@users.noreply.github.com |
e3ac4147e1a9426e76114516f5dc51273d8efdca | fbadd8dd87ffbf43fd61d00e4c87b3ec61a921fa | /libevent_test/src/ev_ftp_server/XFtpLIST.h | 40e3cf3672f817e2d3d7757ced282bb81ac819fb | [] | no_license | WangDavid2012/opensource | 90733bb075fa892478b11fe5f8780ece575127f2 | 3422ab3bf00752932c01300ab10f7fb7ae03b0f5 | refs/heads/master | 2023-02-16T18:59:26.284738 | 2021-01-15T23:07:40 | 2021-01-15T23:07:40 | 238,653,917 | 0 | 0 | null | null | null | null | WINDOWS-1252 | C++ | false | false | 315 | h | #pragma once
#include "XFtpTask.h"
class XFtpLIST : public XFtpTask
{
public:
//½âÎöÐÒé
virtual void Parse(std::string type, std::string msg);
virtual void Write(struct bufferevent *bev);
virtual void Event(struct bufferevent *bev, short what);
private:
std::string GetListData(std::string path);
};
| [
"1285078875@qq.com"
] | 1285078875@qq.com |
2ca574d1d1d51773b968da92afe5c41ffe5600df | f4d5a386bf4f15ccc6ef7dd0875b908ad5b13fd9 | /algorithms/delelemfromsingllinklist.cpp | 5d886f9d8af187e85b9bf0059b17bd8914d23da4 | [] | no_license | wangchaomail/methods | 1a18c6554519926f8606591f4ebcfe452baeefde | 2e88eae7b88f8747ba1b633532429f6720ce676b | refs/heads/master | 2023-03-17T22:42:04.886815 | 2020-03-06T15:15:34 | 2020-03-06T15:15:34 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 50 | cpp | ../iq/c-c++/bloomberg/delelemfromsingllinklist.cpp | [
"vadim@boinc.com"
] | vadim@boinc.com |
f0e200a175d589920d2d11678a200592b2ec5509 | c0bfa3c2ad7497b2cd5d333a540835da21b5750c | /Hash2.cpp | 841fb3b847df216a0df79ff28d3da4a5e902f31e | [] | no_license | ldlovecpp/Hash_Table | 331de6008dcad9e6d8871fda2861ce281fcef6a5 | 6a0d0a7e0182d83236da0bfcae0b3485bf4c53e8 | refs/heads/master | 2022-11-21T20:13:49.174688 | 2020-07-24T14:01:20 | 2020-07-24T14:01:20 | null | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 4,887 | cpp | #include<iostream>
#include<vector>
#include<utility>
using namespace std;
template<class V>
struct HashNode
{
V _val;
HashNode<V>* _next;
HashNode(const V& val=V())
:_val(val)
,_next(nullptr)
{}
};
struct StrToInt
{
size_t operator ()(const string& str)
{
size_t res=0;
for(auto& a: str)
{
res=res*131+a;
}
return res;
}
};
template<class K>
struct hashFun{
size_t operator () (const K& key)
{
return key;
}
};
template<class K,class V,class KOV,class HF>
class HashTable;
template<class K,class V,class KOV,class HF>
struct HashIterator
{
typedef HashTable<K,V,KOV,HF> HT;
typedef HashNode<V> Node;
typedef HashIterator<K,V,KOV,HF> Self;
Node* _node;
HT* _ht;
HashIterator(Node* node,HT* ht)
:_node(node)
,_ht(ht)
{}
V& operator *()
{
return _node->_val;
}
V* operator ->()
{
return &_node->_val;
}
bool operator == (const Self& p)
{
return _node==p._node;
}
bool operator !=(const Self& p)
{
return _node!=p._node;
}
Self& operator ++()
{
KOV kov;
HF hf;
Node* cur=_node->_next;
if(cur)
{
_node=_node->_next;
}
else
{
size_t idx=hf(kov(_node->_val)) % _ht->_table.size();
++idx;
for(;idx<_ht->_table.size();idx++)
{
if(_ht->_table[idx])
{
_node=_ht->_table[idx];
break;
}
}
if(idx==_ht->_table.size())
_node=nullptr;
}
return *this;
}
};
template<class K,class V,class KOV,class HF>
class HashTable
{
public:
friend struct HashIterator<K,V,KOV,HF>;
typedef HashNode<V> Node;
typedef HashIterator<K,V,KOV,HF> iterator;
iterator begin()
{
for(int i=0;i<_table.size();i++)
{
Node* cur=_table[i];
if(cur)
{
return iterator(cur,this);
}
}
return iterator(nullptr,this);
}
iterator end()
{
return iterator(nullptr,this);
}
bool insert(const V& val)
{
//检查容量
checkCapacity();
//寻找插入位置
KOV kov;
HF hf;
size_t idx=hf(kov(val)) % _table.size();
Node* cur=_table[idx];
while(cur)
{
if(kov(cur->_val)==kov(val))
{
return false;
}
cur=cur->_next;
}
//创建新的节点并插入
cur=new Node(val);
cur->_next=_table[idx];
_table[idx]=cur;
++_size;
return true;
}
void checkCapacity()
{
if(_size==_table.size())
{
//确定大小
size_t newSize=_size==0?5:_size*2;
//建立新表
vector<Node*> ht;
ht.resize(newSize);
//将旧表插入新表
//1.拿到旧表的每一个位置
KOV kov;
HF hf;
for(size_t i=0;i<_size;i++)
{
Node* cur=_table[i];
while(cur)
{
Node* next=cur->_next;
size_t idx=hf(kov(cur->_val))%newSize;
cur->_next=ht[idx];
ht[idx]=cur;
cur=next;
}
_table[i]=nullptr;
}
swap(_table,ht);
}
}
Node* find(const K& key)
{
if(_table.size()==0)
return nullptr;
HF hf;
KOV kov;
int idx=hf(key)%_table.size();
Node* cur=_table[idx];
while(cur)
{
if(kov(cur->_val)==key)
{
return cur;
}
cur=cur->_next;
}
return nullptr;
}
bool erase(const K& key)
{
HF hf;
size_t idx=hf(key)%_size;
Node* cur=_table[idx];
Node* prev=cur;
KOV kov;
while(cur)
{
if(kov(cur->_val)==key)
{
if(prev==nullptr)
{
_table[idx]=cur->_next;
}
else
{
prev->_next=cur->_next;
}
delete cur;
return true;
}
prev=cur;
cur=cur->_next;
}
return false;
}
private:
size_t _size=0;
vector<Node*>_table;
};
template <class K,class V,class HF=hashFun<K>>
class Unorderedmap
{
struct KeyOfVal
{
K operator ()(const pair<K,V>& value)
{
return value.first;
}
};
public:
typedef typename HashTable<K,pair<K,V>,KeyOfVal,HF>::iterator iterator;
iterator begin()
{
return _table.begin();
}
iterator end()
{
return _table.end();
}
bool insert(const pair<K,V>& val)
{
return _table.insert(val);
}
HashNode<V>* find(const K& key)
{
HashNode<V>* res=_table.find(key);
if(res==nullptr)
{
return nullptr;
}
else
{
return res;
}
}
bool erase(const K& key)
{
return _table.erase(key);
}
private:
HashTable<K,pair<K,V>,KeyOfVal,HF>_table;
};
void test()
{
Unorderedmap<string,int,StrToInt> mp;
mp.insert(make_pair("luodong",1));
mp.insert(make_pair("wangdingyang",1));
mp.insert(make_pair("xxx",1));
mp.insert(make_pair("gdsadasd",1));
mp.insert(make_pair("dasdsa",1));
mp.insert(make_pair("dsadsadsa",1));
Unorderedmap<string,int,StrToInt>::iterator p=mp.begin();
while(p!=mp.end())
{
cout<<p->first<<"---->"<<p->second<<endl;
++p;
}
}
int main()
{
test();
return 0;
}
| [
"you@example.com"
] | you@example.com |
af98146667d6fe0cef171c121adb27f0e89e3af2 | 4abf5aef87f058c98bdeb641da33da209746a0e4 | /weatherSensors/main.cpp | 6bc45b6eb470e774a0aad155ad3a3051e7318b44 | [] | no_license | calimero921/ARDUINO_WEATHER | a6252bd11004d6b0d79d8a743ef2a431a7bfa765 | 2962ab94ca086f3a54c07ed9ab8159b325d6c65f | refs/heads/master | 2021-01-22T22:49:57.846650 | 2014-02-27T09:31:06 | 2014-02-27T09:31:06 | null | 0 | 0 | null | null | null | null | ISO-8859-1 | C++ | false | false | 186 | cpp | /*
* main.cpp
*
* Created on: 24 févr. 2014
* Author: bede6362
*/
#include <weatherSensors.h>
weatherSensors sensor;
void setup() {
}
void loop() {
}
| [
"bede6362@EB-OR1009018.bordeaux.francetelecom.fr"
] | bede6362@EB-OR1009018.bordeaux.francetelecom.fr |
ca8e2df074dc665bbfa171cb6ce4f33efb917b39 | 79cd409b4b12f8ab76a31130750753e147c5dd4e | /wiselib.testing/util/pstl/unique_container.h | 4e58279dac68f1de93e0a72c658000009e43dfcb | [] | no_license | bjoerke/wiselib | d28eb39e9095c9bfcec6b4c635b773f5fcaf87fa | 183726cbf744be9d65f12dd01bece0f7fd842541 | refs/heads/master | 2020-12-28T20:30:40.829538 | 2014-08-18T14:10:42 | 2014-08-18T14:10:42 | 19,933,324 | 1 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 2,819 | h | /***************************************************************************
** This file is part of the generic algorithm library Wiselib. **
** Copyright (C) 2008,2009 by the Wisebed (www.wisebed.eu) project. **
** **
** The Wiselib is free software: you can redistribute it and/or modify **
** it under the terms of the GNU Lesser General Public License as **
** published by the Free Software Foundation, either version 3 of the **
** License, or (at your option) any later version. **
** **
** The Wiselib is distributed in the hope that it will be useful, **
** but WITHOUT ANY WARRANTY; without even the implied warranty of **
** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the **
** GNU Lesser General Public License for more details. **
** **
** You should have received a copy of the GNU Lesser General Public **
** License along with the Wiselib. **
** If not, see <http://www.gnu.org/licenses/>. **
***************************************************************************/
#ifndef UNIQUE_CONTAINER_H
#define UNIQUE_CONTAINER_H
namespace wiselib {
/**
* @brief Wrapper around a container that ensures values are not inserted
* if they already exist in the contanier (by using find()).
*
* @ingroup
*
* @tparam
*/
template<
typename Container_P
>
class UniqueContainer : public Container_P {
public:
typedef Container_P Container;
typedef typename Container::value_type value_type;
typedef typename Container::iterator iterator;
typedef typename Container::OsModel OsModel;
typedef typename OsModel::block_data_t block_data_t;
typedef typename OsModel::size_t size_type;
iterator insert(const value_type& v) {
iterator it = Container::find(v);
if(it != Container::end()) { return it; }
return Container::insert(v);
}
iterator insert(iterator iter, const value_type& v) {
iterator it = Container::find(v);
if(it != Container::end()) { return it; }
return Container::insert(iter, v);
}
iterator push_back(const value_type& v) {
iterator it = Container::find(v);
if(it != Container::end()) { return it; }
return Container::push_back(v);
}
iterator push_front(const value_type& v) {
iterator it = Container::find(v);
if(it != Container::end()) { return it; }
return Container::push_front(v);
}
private:
}; // UniqueContainer
}
#endif // UNIQUE_CONTAINER_H
| [
"henning@leetless.de"
] | henning@leetless.de |
3f435c18781303e3131ce0a8214eb5e3e6045040 | 453302ff0c16614eb820c7153835bb549d903ebe | /devel/include/mbf_msgs/CheckPointRequest.h | 561a5492b95c1b94ca7b3622efc3f2d70434a7ee | [] | no_license | SimonSongg/catkin_ws | 591a8bee4864b2e31cc2abb041c082e2d4dc9d83 | afbf5346be2fdfa5aace7ed949d5023b3016363d | refs/heads/main | 2023-07-13T13:28:00.827983 | 2021-08-22T07:18:01 | 2021-08-22T07:18:01 | 380,931,544 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 6,969 | h | // Generated by gencpp from file mbf_msgs/CheckPointRequest.msg
// DO NOT EDIT!
#ifndef MBF_MSGS_MESSAGE_CHECKPOINTREQUEST_H
#define MBF_MSGS_MESSAGE_CHECKPOINTREQUEST_H
#include <string>
#include <vector>
#include <map>
#include <ros/types.h>
#include <ros/serialization.h>
#include <ros/builtin_message_traits.h>
#include <ros/message_operations.h>
#include <geometry_msgs/PointStamped.h>
namespace mbf_msgs
{
template <class ContainerAllocator>
struct CheckPointRequest_
{
typedef CheckPointRequest_<ContainerAllocator> Type;
CheckPointRequest_()
: point()
, costmap(0) {
}
CheckPointRequest_(const ContainerAllocator& _alloc)
: point(_alloc)
, costmap(0) {
(void)_alloc;
}
typedef ::geometry_msgs::PointStamped_<ContainerAllocator> _point_type;
_point_type point;
typedef uint8_t _costmap_type;
_costmap_type costmap;
// reducing the odds to have name collisions with Windows.h
#if defined(_WIN32) && defined(LOCAL_COSTMAP)
#undef LOCAL_COSTMAP
#endif
#if defined(_WIN32) && defined(GLOBAL_COSTMAP)
#undef GLOBAL_COSTMAP
#endif
enum {
LOCAL_COSTMAP = 1u,
GLOBAL_COSTMAP = 2u,
};
typedef boost::shared_ptr< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> > Ptr;
typedef boost::shared_ptr< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> const> ConstPtr;
}; // struct CheckPointRequest_
typedef ::mbf_msgs::CheckPointRequest_<std::allocator<void> > CheckPointRequest;
typedef boost::shared_ptr< ::mbf_msgs::CheckPointRequest > CheckPointRequestPtr;
typedef boost::shared_ptr< ::mbf_msgs::CheckPointRequest const> CheckPointRequestConstPtr;
// constants requiring out of line definition
template<typename ContainerAllocator>
std::ostream& operator<<(std::ostream& s, const ::mbf_msgs::CheckPointRequest_<ContainerAllocator> & v)
{
ros::message_operations::Printer< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >::stream(s, "", v);
return s;
}
template<typename ContainerAllocator1, typename ContainerAllocator2>
bool operator==(const ::mbf_msgs::CheckPointRequest_<ContainerAllocator1> & lhs, const ::mbf_msgs::CheckPointRequest_<ContainerAllocator2> & rhs)
{
return lhs.point == rhs.point &&
lhs.costmap == rhs.costmap;
}
template<typename ContainerAllocator1, typename ContainerAllocator2>
bool operator!=(const ::mbf_msgs::CheckPointRequest_<ContainerAllocator1> & lhs, const ::mbf_msgs::CheckPointRequest_<ContainerAllocator2> & rhs)
{
return !(lhs == rhs);
}
} // namespace mbf_msgs
namespace ros
{
namespace message_traits
{
template <class ContainerAllocator>
struct IsFixedSize< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
: FalseType
{ };
template <class ContainerAllocator>
struct IsFixedSize< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> const>
: FalseType
{ };
template <class ContainerAllocator>
struct IsMessage< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
: TrueType
{ };
template <class ContainerAllocator>
struct IsMessage< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> const>
: TrueType
{ };
template <class ContainerAllocator>
struct HasHeader< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
: FalseType
{ };
template <class ContainerAllocator>
struct HasHeader< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> const>
: FalseType
{ };
template<class ContainerAllocator>
struct MD5Sum< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
{
static const char* value()
{
return "36e9c2f425eee0a2ebd8c4b0aae9f573";
}
static const char* value(const ::mbf_msgs::CheckPointRequest_<ContainerAllocator>&) { return value(); }
static const uint64_t static_value1 = 0x36e9c2f425eee0a2ULL;
static const uint64_t static_value2 = 0xebd8c4b0aae9f573ULL;
};
template<class ContainerAllocator>
struct DataType< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
{
static const char* value()
{
return "mbf_msgs/CheckPointRequest";
}
static const char* value(const ::mbf_msgs::CheckPointRequest_<ContainerAllocator>&) { return value(); }
};
template<class ContainerAllocator>
struct Definition< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
{
static const char* value()
{
return "uint8 LOCAL_COSTMAP = 1\n"
"uint8 GLOBAL_COSTMAP = 2\n"
"\n"
"geometry_msgs/PointStamped point # the point to be checked after transforming to costmap frame\n"
"uint8 costmap # costmap in which to check the point\n"
"\n"
"================================================================================\n"
"MSG: geometry_msgs/PointStamped\n"
"# This represents a Point with reference coordinate frame and timestamp\n"
"Header header\n"
"Point point\n"
"\n"
"================================================================================\n"
"MSG: std_msgs/Header\n"
"# Standard metadata for higher-level stamped data types.\n"
"# This is generally used to communicate timestamped data \n"
"# in a particular coordinate frame.\n"
"# \n"
"# sequence ID: consecutively increasing ID \n"
"uint32 seq\n"
"#Two-integer timestamp that is expressed as:\n"
"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
"# time-handling sugar is provided by the client library\n"
"time stamp\n"
"#Frame this data is associated with\n"
"string frame_id\n"
"\n"
"================================================================================\n"
"MSG: geometry_msgs/Point\n"
"# This contains the position of a point in free space\n"
"float64 x\n"
"float64 y\n"
"float64 z\n"
;
}
static const char* value(const ::mbf_msgs::CheckPointRequest_<ContainerAllocator>&) { return value(); }
};
} // namespace message_traits
} // namespace ros
namespace ros
{
namespace serialization
{
template<class ContainerAllocator> struct Serializer< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
{
template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
{
stream.next(m.point);
stream.next(m.costmap);
}
ROS_DECLARE_ALLINONE_SERIALIZER
}; // struct CheckPointRequest_
} // namespace serialization
} // namespace ros
namespace ros
{
namespace message_operations
{
template<class ContainerAllocator>
struct Printer< ::mbf_msgs::CheckPointRequest_<ContainerAllocator> >
{
template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::mbf_msgs::CheckPointRequest_<ContainerAllocator>& v)
{
s << indent << "point: ";
s << std::endl;
Printer< ::geometry_msgs::PointStamped_<ContainerAllocator> >::stream(s, indent + " ", v.point);
s << indent << "costmap: ";
Printer<uint8_t>::stream(s, indent + " ", v.costmap);
}
};
} // namespace message_operations
} // namespace ros
#endif // MBF_MSGS_MESSAGE_CHECKPOINTREQUEST_H
| [
"sf995511sf@qq.com"
] | sf995511sf@qq.com |
06f5378ae464cc8f555f2278f0f809d8bb5fa1ca | 6bb851f5d8c743ab3e7037bcea033b0e3430eddb | /data/Submission/492 Construct the Rectangle/Construct the Rectangle_1.cpp | b4c8962492f42bab33e39827953722b876374490 | [] | no_license | CJHMPower/Fetch_Leetcode | 96f67ca8609955524f01124f17cb570361ea835f | 5dbd1d0ff56f8ab38fe3587519a973a3d712e758 | refs/heads/master | 2020-03-24T04:26:02.829710 | 2018-07-26T14:31:36 | 2018-07-26T14:31:36 | 142,452,783 | 1 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 939 | cpp | //-*- coding:utf-8 -*-
// Generated by the Fetch-Leetcode project on the Github
// https://github.com/CJHMPower/Fetch-Leetcode/
// 492 Construct the Rectangle
// https://leetcode.com//problems/construct-the-rectangle/description/
// Fetched at 2018-07-24
// Submitted 3 months, 2 weeks ago
// Runtime: 3 ms
// This solution defeats 31.33% cpp solutions
class Solution {
public:
int int_sqrt(int area) {
int low = 1, high = area;
long long int mid, val;
while (low <= high) {
mid = (low + high) / 2;
val = mid * mid;
if (val == area) {
return mid;
} else if (val < area) {
low = mid + 1;
} else {
high = mid - 1;
}
}
mid = (low + high) / 2;
return mid;
}
vector<int> constructRectangle(int area) {
int start = int_sqrt(area);
while (start > 1 && area % start != 0) {
start--;
}
return vector<int>{area / start, start};
}
}; | [
"1120798947@qq.com"
] | 1120798947@qq.com |
a9107acc00e6e302af38431a7adfb1863855b1a2 | 867200291c8775c71eac47431d727961c37d66d5 | /assignment_package/src/scene/blocktypeworker.h | 575e870323d47aa1ffe6093b3f0206c497736476 | [] | no_license | ayang015seas/MiniMinecraft | a08f680f42e2410a55b2bb2a9e377a0600f73213 | d3d3c29c06361eb3bb33e204d50f8931e05b6eb3 | refs/heads/master | 2022-12-22T01:53:32.180738 | 2020-09-23T01:48:43 | 2020-09-23T01:48:43 | 297,821,707 | 0 | 1 | null | null | null | null | UTF-8 | C++ | false | false | 585 | h | #pragma once
#include "smartpointerhelp.h"
#include "glm_includes.h"
#include "chunk.h"
#include <QRunnable>
#include <QMutex>
#include <unordered_map>
#include "noise.h"
#include <iostream>
#include "lsystem.h"
class BlockTypeWorker : public QRunnable {
private:
std::vector<Chunk*>* chonks;
QMutex* mut;
std::vector<Chunk*> toAdd;
int x, z, seed;
public:
BlockTypeWorker(std::vector<Chunk*>* chonks, QMutex* mut,
std::vector<Chunk*> toAdd, int x, int z, int seed);
void run() override;
Chunk* createBlockTypeData(Chunk *cPtr);
};
| [
"43522595+ayang015seas@users.noreply.github.com"
] | 43522595+ayang015seas@users.noreply.github.com |
64b3de919a5daf2642677e072cc5779a5652fef9 | 0cf886b9cc9b6af538cfbb08e0dc495ea342762c | /game/src/XDSprite.cpp | f9a7f2a9a0e60a5cb318b574eb01b640b1382203 | [] | no_license | xedixermawan/proximity-game-clone2-dx | 3bfd584e552188b773148276e5386f1cd9597ecb | a4149241e381b4fc4b22e183634da4575cacaba9 | refs/heads/master | 2021-01-18T18:35:01.321364 | 2014-05-13T07:17:40 | 2014-05-13T07:17:40 | 32,908,322 | 0 | 0 | null | null | null | null | UTF-8 | C++ | false | false | 4,804 | cpp | /*
* (c) 2013-2014 XediXermawan < edi.ermawan@gmail.com >
*/
#include "pch.h"
#include "XDSprite.h"
#include "XDFileSystem.h"
#include "XDFuncUtils.h"
XDSprite::XDSprite(const std::string& filename,XDFileSystem* xdfs)
: m_FlipH(1) ,
m_Scale(1.0f) {
m_SpriteName = filename;
m_Loader = new SpriteLoaderXML();
std::string corrected_path="";
xdfs->ResolveToROFolder(corrected_path, filename);
m_Loader->Load(corrected_path,&m_Modules,m_Nmodule, m_FrameDesc,m_AnimDesc );
// get texture path
m_TextureName="";
char temp[512];
GetFilenameNoExt(temp,m_SpriteName.c_str());
strcat_s(temp,".dds");
xdfs->ResolveToROFolder(m_TextureName, temp );
#ifndef NDEBUG
// test module
for(int i=0; i< m_Nmodule ; i++) {
for(int n=0; n<SpriteConstant::N_MODULE_DEF; n++) {
int vann = m_Modules[i][n];
}
}
// test frames
for(unsigned int i=0; i<m_FrameDesc.size(); i++) {
unsigned int n = m_FrameDesc[i]->m_ModuleCount;
for(unsigned int j=0; j<n; j++) {
for(unsigned int k=0; k<SpriteConstant::N_FRAME_DEF; k++) {
unsigned int vann = m_FrameDesc[i]->m_Modules[j][k];
}
}
}
// test anims
for(unsigned int i=0; i<m_AnimDesc.size(); i++) {
unsigned int n = m_AnimDesc[i]->m_FrameCount;
for(unsigned int j=0; j<n; j++) {
for(unsigned int k=0; k<SpriteConstant::N_ANIM_DEF; k++) {
unsigned int vann = m_AnimDesc[i]->m_Frames[j][k];
}
}
}
#endif
}
void XDSprite::SetSpriteBatch(SpriteBatch* spritebatch) {
m_SpriteBatch = spritebatch;
}
void XDSprite::LoadTexture(ID3D11Device* dxdevice) {
assert ( ( FileSystem::FileExist ( m_TextureName.c_str() ) == true ) );
CreateDDSTextureFromFile(dxdevice, UTF8ToWChar (m_TextureName.c_str() ), nullptr, &m_ShaderResView, MAXSIZE_T);
assert( m_ShaderResView );
}
void XDSprite::SetSRV( ID3D11ShaderResourceView* srv) {
m_ShaderResView = srv;
}
int XDSprite::GetModuleIndexByID(int moduid) const {
for(int i=0; i< m_Nmodule ; i++) {
if(moduid == m_Modules[i][1]) {
return i;
}
}
return -1;
}
int XDSprite::GetFrameIndexByID(int frameuid) const {
for(unsigned int i=0; i<m_FrameDesc.size(); i++) {
if(frameuid == m_FrameDesc[i]->m_FrameID ) {
return i;
}
}
return -1;
}
void XDSprite::PaintModuleNth(unsigned int index,int offsetx,int offsety) {
int _module_x = m_Modules[index][2];
int _module_y = m_Modules[index][3];
int _module_w = m_Modules[index][4];
int _module_h = m_Modules[index][5];
RECT rect1;
rect1.top = _module_y;
rect1.left = _module_x ;
rect1.bottom = _module_y + _module_h;
rect1.right = _module_x +_module_w;
DirectX::SpriteEffects flipsprite = SpriteEffects_None;
if( m_FlipH == -1)
flipsprite = SpriteEffects_FlipHorizontally;
m_SpriteBatch->Draw( m_ShaderResView,
XMFLOAT2( (float) offsetx , (float) offsety),
&rect1,
Colors::White,
0.0f,
XMFLOAT2(0.0f, 0.0f),
XMFLOAT2( m_Scale, m_Scale ) ,
flipsprite,
0.0f);
}
void XDSprite::PaintFrameNth(unsigned int index, int offsetx, int offsety) {
int frame_n = index;
int _module_count = m_FrameDesc[frame_n]->m_ModuleCount;
for(int i=0; i<_module_count; i++) {
int _module_index = GetModuleIndexByID( m_FrameDesc[frame_n]->m_Modules[i][0] ) ;
int _ox = m_FrameDesc[frame_n]->m_Modules[i][1];
int _oy = m_FrameDesc[frame_n]->m_Modules[i][2];
PaintModuleNth(_module_index, _ox + offsetx , _oy + offsety );
}
}
void XDSprite::PaintFrameAnim(unsigned int animIndex, unsigned int frameIndex, int offsetx, int offsety ) {
int _frame_uid = m_AnimDesc[animIndex]->m_Frames[frameIndex][0];
int _frame_ox = m_AnimDesc[animIndex]->m_Frames[frameIndex][1];
int _frame_oy = m_AnimDesc[animIndex]->m_Frames[frameIndex][2];
int _frame_time = m_AnimDesc[animIndex]->m_Frames[frameIndex][3];
int _frame_index = GetFrameIndexByID( _frame_uid );
PaintFrameNth(_frame_index, m_FlipH *_frame_ox * m_Scale + offsetx, _frame_oy * m_Scale + offsety );
}
int XDSprite::GetNFrameAnim(int animIndex) {
return m_AnimDesc[animIndex]->m_FrameCount;
}
XDSprite::~XDSprite() {
}
void XDSprite::FlipHorizontally() {
m_FlipH = m_FlipH * (-1);
}
void XDSprite::SetScale(float scale) {
m_Scale = scale;
} | [
"edi.ermawan@gmail.com@46f14409-37b9-1770-7b49-ba1cd2439d2f"
] | edi.ermawan@gmail.com@46f14409-37b9-1770-7b49-ba1cd2439d2f |
d004ec545f7b319ab0f6bcd0925015b2cb4986f4 | 1cc5d45273d008e97497dad9ec004505cc68c765 | /cheatsheet/ops_doc-master/Service/cfaq/example_c/class_inline_2/fn.h | 3764628b4dc7201d2f50b759d21c5c46b186f820 | [] | no_license | wangfuli217/ld_note | 6efb802989c3ea8acf031a10ccf8a8a27c679142 | ad65bc3b711ec00844da7493fc55e5445d58639f | refs/heads/main | 2023-08-26T19:26:45.861748 | 2023-03-25T08:13:19 | 2023-03-25T08:13:19 | 375,861,686 | 5 | 6 | null | null | null | null | UTF-8 | C++ | false | false | 125 | h | #ifndef __FN_H__
#define __FN_H__
class FunInline {
public:
FunInline() {}
~FunInline() {}
void version();
};
#endif
| [
"wangfl217@126.com"
] | wangfl217@126.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.